code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "vit_msn"
def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias | 8 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ):
try:
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ = []
snake_case_ = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase_ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "albert"
def __init__( self : Tuple , _UpperCamelCase : List[str]=3_0_0_0_0 , _UpperCamelCase : Optional[Any]=1_2_8 , _UpperCamelCase : List[Any]=4_0_9_6 , _UpperCamelCase : int=1_2 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : Any=6_4 , _UpperCamelCase : str=1_6_3_8_4 , _UpperCamelCase : str=1 , _UpperCamelCase : Union[str, Any]="gelu_new" , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Optional[Any]=5_1_2 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : Dict=1e-12 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Tuple="absolute" , _UpperCamelCase : int=0 , _UpperCamelCase : int=2 , _UpperCamelCase : Any=3 , **_UpperCamelCase : Dict , ) ->Dict:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = embedding_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_hidden_groups
snake_case_ = num_attention_heads
snake_case_ = inner_group_num
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = classifier_dropout_prob
snake_case_ = position_embedding_type
class snake_case_ ( __A ):
'''simple docstring'''
@property
def snake_case__( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 8 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : Optional[int] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def snake_case__( self : List[Any] ) ->Optional[int]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple:
snake_case_ = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse} | 8 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict=1_3 , _UpperCamelCase : Optional[Any]=3_2 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Optional[int]=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase : Optional[int]=[2, 2, 3, 2] , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=3_7 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Tuple=1_0 , _UpperCamelCase : str=0.02 , _UpperCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[str]=None , ) ->str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = num_stages
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__( self : int ) ->List[str]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case__( self : Optional[int] ) ->Optional[int]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCamelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) ->int:
snake_case_ = UperNetForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case__( self : List[Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = False
def snake_case__( self : str ) ->int:
snake_case_ = UperNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : int ) ->List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__( self : List[Any] ) ->Dict:
return
def snake_case__( self : Tuple ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case__( self : List[str] ) ->Tuple:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case__( self : Any ) ->Tuple:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case__( self : Any ) ->Optional[int]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case__( self : int ) ->str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case__( self : Dict ) ->Tuple:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__( self : Optional[int] ) ->Dict:
pass
def snake_case__( self : Any ) ->Any:
def check_hidden_states_output(_UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ):
snake_case_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(_UpperCamelCase )
snake_case_ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case__( self : List[Any] ) ->Tuple:
pass
@slow
def snake_case__( self : List[str] ) ->List[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = UperNetForSemanticSegmentation.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
snake_case_ = Image.open(SCREAMING_SNAKE_CASE__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
snake_case_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_UpperCamelCase )
snake_case_ = prepare_img()
snake_case_ = processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
with torch.no_grad():
snake_case_ = model(**_UpperCamelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
snake_case_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_UpperCamelCase )
snake_case_ = prepare_img()
snake_case_ = processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
with torch.no_grad():
snake_case_ = model(**_UpperCamelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) | 8 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = nums.pop(0 )
snake_case_ = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def backtrack(SCREAMING_SNAKE_CASE__ ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_, snake_case_ = nums[i], nums[start]
backtrack(start + 1 )
snake_case_, snake_case_ = nums[i], nums[start] # backtrack
snake_case_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod() | 8 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 ):
snake_case_ = None
if token is not None:
snake_case_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ = '''636036'''
snake_case_ = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json()
return result["workflow_runs"]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = get_daily_ci_runs(SCREAMING_SNAKE_CASE__ )
snake_case_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ = workflow_run['''id''']
break
return workflow_run_id
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE__ )
if workflow_run_id is not None:
snake_case_ = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=SCREAMING_SNAKE_CASE__ , artifact_url=SCREAMING_SNAKE_CASE__ , output_dir=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = {}
for artifact_name in artifact_names:
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{artifact_name}.zip''' )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ = f.read().decode('''UTF-8''' )
return results | 8 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 | 1 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()]
snake_case_ = [x.strip() for x in open(SCREAMING_SNAKE_CASE__ ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
snake_case_ = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if save_path is not None:
save_json(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 8 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] ) | 8 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["input_values", "padding_mask"]
def __init__( self : Any , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 2_4_0_0_0 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = None , _UpperCamelCase : float = None , **_UpperCamelCase : List[Any] , ) ->List[str]:
super().__init__(feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = chunk_length_s
snake_case_ = overlap
@property
def snake_case__( self : Tuple ) ->Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__( self : int ) ->Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Tuple , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , _UpperCamelCase : Optional[bool] = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : Optional[int] = None , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
snake_case_ = True
snake_case_ = bool(
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
snake_case_ = [np.asarray(_UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
snake_case_ = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case_ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [np.asarray(_UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
snake_case_ = None
snake_case_ = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case_ = min(array.shape[0] for array in raw_audio )
snake_case_ = int(np.floor(max_length / self.chunk_stride ) )
snake_case_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case_ = max(array.shape[0] for array in raw_audio )
snake_case_ = int(np.ceil(max_length / self.chunk_stride ) )
snake_case_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case_ = '''max_length'''
else:
snake_case_ = input_values
# normal padding on batch
if padded_inputs is None:
snake_case_ = self.pad(
_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase , padding=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
if padding:
snake_case_ = padded_inputs.pop('''attention_mask''' )
snake_case_ = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
snake_case_ = example[..., None]
input_values.append(example.T )
snake_case_ = input_values
if return_tensors is not None:
snake_case_ = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs | 8 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "vit_msn"
def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias | 8 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 8 |
from __future__ import annotations
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return max(metric_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for gt in ground_truths )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case_ = []
if args.gold_data_mode == "qa":
snake_case_ = pd.read_csv(SCREAMING_SNAKE_CASE__ , sep='''\t''' , header=SCREAMING_SNAKE_CASE__ )
for answer_list in data[1]:
snake_case_ = ast.literal_eval(SCREAMING_SNAKE_CASE__ )
answers.append(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case_ = [[reference] for reference in references]
snake_case_ = snake_case_ = snake_case_ = 0
for prediction, ground_truths in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
total += 1
em += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
fa += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 100.0 * em / total
snake_case_ = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = args.k
snake_case_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case_ = snake_case_ = 0
for hypo, reference in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = set(hypo.split('''\t''' )[:k] )
snake_case_ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case_ = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def strip_title(SCREAMING_SNAKE_CASE__ ):
if title.startswith('''"''' ):
snake_case_ = title[1:]
if title.endswith('''"''' ):
snake_case_ = title[:-1]
return title
snake_case_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )['''input_ids'''].to(args.device )
snake_case_ = rag_model.rag.question_encoder(SCREAMING_SNAKE_CASE__ )
snake_case_ = question_enc_outputs[0]
snake_case_ = rag_model.retriever(
SCREAMING_SNAKE_CASE__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
snake_case_ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case_ = []
for docs in all_docs:
snake_case_ = [strip_title(SCREAMING_SNAKE_CASE__ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(SCREAMING_SNAKE_CASE__ ) )
return provenance_strings
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with torch.no_grad():
snake_case_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
snake_case_ = inputs_dict.input_ids.to(args.device )
snake_case_ = inputs_dict.attention_mask.to(args.device )
snake_case_ = rag_model.generate( # rag_model overwrites generate
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=SCREAMING_SNAKE_CASE__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case_ = rag_model.retriever.generator_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
if args.print_predictions:
for q, a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.info('''Q: {} - A: {}'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return answers
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=SCREAMING_SNAKE_CASE__ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=SCREAMING_SNAKE_CASE__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=SCREAMING_SNAKE_CASE__ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=SCREAMING_SNAKE_CASE__ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=SCREAMING_SNAKE_CASE__ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=SCREAMING_SNAKE_CASE__ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=SCREAMING_SNAKE_CASE__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=SCREAMING_SNAKE_CASE__ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=SCREAMING_SNAKE_CASE__ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=SCREAMING_SNAKE_CASE__ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
snake_case_ = parser.parse_args()
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
if args.model_type is None:
snake_case_ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
snake_case_ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
snake_case_ = args.n_docs
if args.index_name is not None:
snake_case_ = args.index_name
if args.index_path is not None:
snake_case_ = args.index_path
else:
snake_case_ = BartForConditionalGeneration
snake_case_ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
snake_case_ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(SCREAMING_SNAKE_CASE__ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(SCREAMING_SNAKE_CASE__ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
snake_case_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , retriever=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
model.retriever.init_retrieval()
else:
snake_case_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
snake_case_ = []
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
questions.append(line.strip() )
if len(SCREAMING_SNAKE_CASE__ ) == args.eval_batch_size:
snake_case_ = evaluate_batch_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
preds_file.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
preds_file.flush()
snake_case_ = []
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ = evaluate_batch_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
preds_file.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
preds_file.flush()
score_fn(SCREAMING_SNAKE_CASE__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args) | 8 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return x + 2
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
snake_case_ = '''x = y'''
snake_case_ = {'''y''': 5}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} )
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = '''y = add_two(x)'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Dict ) ->str:
snake_case_ = '''x = 3\ny = 5'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
def snake_case__( self : str ) ->Tuple:
snake_case_ = '''text = f\'This is x: {x}.\''''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} )
snake_case_ = {'''x''': 8}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} )
def snake_case__( self : str ) ->str:
snake_case_ = '''test_list = [x, add_two(x)]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [3, 5] )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''y = x'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} )
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 0\nfor i in range(3):\n x = i'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} ) | 8 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase_ = logging.getLogger()
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case_ = parser.parse_args()
return args.f
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = json.load(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def __SCREAMING_SNAKE_CASE ():
snake_case_ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case_ ( __A ):
'''simple docstring'''
@classmethod
def snake_case__( cls : Optional[int] ) ->List[Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
snake_case_ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__( cls : Dict ) ->str:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : int ) ->Optional[int]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : Any ) ->int:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : str ) ->Union[str, Any]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : Tuple ) ->Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case_ = 7 if get_gpu_count() > 1 else 2
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 2_8 )
self.assertGreaterEqual(result['''eval_exact'''] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : List[str] ) ->List[str]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : str ) ->Any:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : Any ) ->List[str]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def snake_case__( self : Optional[Any] ) ->Union[str, Any]:
snake_case_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__( self : Any ) ->Union[str, Any]:
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
snake_case_ = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''image_classification_no_trainer''' ) ) ) | 8 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy'''
def snake_case__( self : Any ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return image
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = '''bf16''' if fpaa else None
snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained(
_UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase )
return model, params
def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict:
snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) | 8 | 1 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1000 ):
snake_case_, snake_case_ = 1, 1
snake_case_ = []
for i in range(1 , n + 1 ):
snake_case_ = prev_numerator + 2 * prev_denominator
snake_case_ = prev_numerator + prev_denominator
if len(str(SCREAMING_SNAKE_CASE__ ) ) > len(str(SCREAMING_SNAKE_CASE__ ) ):
result.append(SCREAMING_SNAKE_CASE__ )
snake_case_ = numerator
snake_case_ = denominator
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 8 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''ViTFeatureExtractor''']
lowerCAmelCase_ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main() | 8 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = "ernie_m"
SCREAMING_SNAKE_CASE : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , _UpperCamelCase : int = 2_5_0_0_0_2 , _UpperCamelCase : int = 7_6_8 , _UpperCamelCase : int = 1_2 , _UpperCamelCase : int = 1_2 , _UpperCamelCase : int = 3_0_7_2 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 5_1_4 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1e-05 , _UpperCamelCase : List[str]=None , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Optional[int]=0.0 , **_UpperCamelCase : Optional[Any] , ) ->Union[str, Any]:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = classifier_dropout
snake_case_ = is_decoder
snake_case_ = act_dropout | 8 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''') | 8 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCAmelCase_ = True
from torch.cuda.amp import autocast
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__A , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__A , metadata={"help": "Whether to log verbose messages or not."} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.99_9995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case_ = logging.WARNING
if model_args.verbose_logging:
snake_case_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
snake_case_ = logging.INFO
logger.setLevel(SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
default=__A , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__A , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : WavaVecaForPreTraining
SCREAMING_SNAKE_CASE : WavaVecaFeatureExtractor
SCREAMING_SNAKE_CASE : Union[bool, str] = "longest"
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self : Dict , _UpperCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) ->Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
snake_case_ = self.feature_extractor.pad(
_UpperCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
snake_case_ = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
snake_case_ = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case_ = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
snake_case_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case_ = 1
snake_case_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCamelCase , min_masks=2 , )
return batch
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Optional[int] , *_UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]=1 , _UpperCamelCase : str=0 , _UpperCamelCase : List[str]=1.0 , **_UpperCamelCase : Union[str, Any] ) ->str:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
snake_case_ = 0
snake_case_ = max_gumbel_temp
snake_case_ = min_gumbel_temp
snake_case_ = gumbel_temp_decay
def snake_case__( self : Optional[int] , _UpperCamelCase : nn.Module , _UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] ) ->torch.Tensor:
model.train()
snake_case_ = self._prepare_inputs(_UpperCamelCase )
if self.use_amp:
with autocast():
snake_case_ = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
else:
snake_case_ = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case_ = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
snake_case_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __SCREAMING_SNAKE_CASE ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_, snake_case_, snake_case_ = parser.parse_args_into_dataclasses()
configure_logger(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Downloading and loading a dataset from the hub.
snake_case_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case_ = DatasetDict()
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
snake_case_ = DatasetDict()
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=SCREAMING_SNAKE_CASE__ )
def prepare_dataset(SCREAMING_SNAKE_CASE__ ):
# check that all files have the correct sampling rate
snake_case_, snake_case_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
snake_case_ = datasets.map(
SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
snake_case_ = vectorized_datasets.filter(
lambda SCREAMING_SNAKE_CASE__ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(SCREAMING_SNAKE_CASE__ ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
snake_case_ = vectorized_datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
snake_case_ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ )
snake_case_ = DataCollatorForWavaVecaPretraining(model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
snake_case_ = WavaVecaPreTrainer(
model=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=SCREAMING_SNAKE_CASE__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main() | 8 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase_ = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None:
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case__( self : str ) ->List[Any]:
return self.sp_model.get_piece_size()
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) ->Any:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple:
return self.sp_model.piece_to_id(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str:
snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase )
snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
snake_case_ = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) )
else:
snake_case_ = ''''''.join(_UpperCamelCase )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 8 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : int ) ->int:
snake_case_ = [[] for _ in range(_UpperCamelCase )]
snake_case_ = size
def __getitem__( self : Optional[Any] , _UpperCamelCase : int ) ->Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def snake_case__( self : Union[str, Any] ) ->Dict:
return self._size
def snake_case__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) ->Dict:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_UpperCamelCase , _UpperCamelCase ) )
def snake_case__( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int ) ->int | None:
snake_case_ = deque([start_vertex] )
snake_case_ = [None] * self.size
snake_case_ = 0
while queue:
snake_case_ = queue.popleft()
snake_case_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case_ = current_distance + edge.weight
snake_case_ = distances[edge.destination_vertex]
if (
isinstance(_UpperCamelCase , _UpperCamelCase )
and new_distance >= dest_vertex_distance
):
continue
snake_case_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
from __future__ import annotations
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {}
snake_case_ = 2
while True:
snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if factor:
snake_case_ = factor + prime
while x in factor_map:
x += factor
snake_case_ = factor
else:
snake_case_ = prime
yield prime
prime += 1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ):
snake_case_ = sieve()
snake_case_ = 1
while True:
snake_case_ = next(SCREAMING_SNAKE_CASE__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE__ )
n += 2
if __name__ == "__main__":
print(solution()) | 8 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class snake_case_ :
'''simple docstring'''
@property
def snake_case__( self : List[Any] ) ->List[Any]:
return self.get_dummy_input()
@property
def snake_case__( self : Optional[int] ) ->Optional[Any]:
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def snake_case__( self : Optional[int] , _UpperCamelCase : int=True , _UpperCamelCase : int=False , _UpperCamelCase : List[str]=False , _UpperCamelCase : List[Any]=False , ) ->Optional[int]:
snake_case_ = 4
snake_case_ = 3_2
snake_case_ = (3_2, 3_2)
snake_case_ = torch.manual_seed(0 )
snake_case_ = torch.device(_UpperCamelCase )
snake_case_ = (batch_size, num_channels) + sizes
snake_case_ = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase )
snake_case_ = {'''hidden_states''': hidden_states}
if include_temb:
snake_case_ = 1_2_8
snake_case_ = randn_tensor((batch_size, temb_channels) , generator=_UpperCamelCase , device=_UpperCamelCase )
if include_res_hidden_states_tuple:
snake_case_ = torch.manual_seed(1 )
snake_case_ = (randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase ),)
if include_encoder_hidden_states:
snake_case_ = floats_tensor((batch_size, 3_2, 3_2) ).to(_UpperCamelCase )
if include_skip_sample:
snake_case_ = randn_tensor(((batch_size, 3) + sizes) , generator=_UpperCamelCase , device=_UpperCamelCase )
return dummy_input
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
snake_case_ = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def snake_case__( self : Optional[int] , _UpperCamelCase : Union[str, Any] ) ->str:
snake_case_, snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.block_class(**_UpperCamelCase )
unet_block.to(_UpperCamelCase )
unet_block.eval()
with torch.no_grad():
snake_case_ = unet_block(**_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = output[0]
self.assertEqual(output.shape , self.output_shape )
snake_case_ = output[0, -1, -3:, -3:]
snake_case_ = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
assert torch_all_close(output_slice.flatten() , _UpperCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def snake_case__( self : Any ) ->Union[str, Any]:
snake_case_, snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.block_class(**_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
snake_case_ = model(**_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = output[0]
snake_case_ = torch.device(_UpperCamelCase )
snake_case_ = randn_tensor(output.shape , device=_UpperCamelCase )
snake_case_ = torch.nn.functional.mse_loss(_UpperCamelCase , _UpperCamelCase )
loss.backward() | 8 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCAmelCase_ = get_tests_dir('''fixtures''')
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Dict:
# A mock response for an HTTP head request to emulate server down
snake_case_ = mock.Mock()
snake_case_ = 5_0_0
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head:
snake_case_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__( self : List[str] ) ->Optional[int]:
# This test is for deprecated behavior and can be removed in v5
snake_case_ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def snake_case__( self : Any ) ->List[str]:
with self.assertRaises(_UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
snake_case_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_UpperCamelCase )
@is_staging_test
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case__( cls : List[Any] ) ->Dict:
snake_case_ = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def snake_case__( cls : List[Any] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = ViTImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase , repo_id='''test-image-processor''' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def snake_case__( self : str ) ->List[str]:
snake_case_ = ViTImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def snake_case__( self : List[str] ) ->Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case_ = CustomImageProcessor.from_pretrained(_UpperCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
snake_case_ = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' ) | 8 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum"
SCREAMING_SNAKE_CASE : Tuple = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
SCREAMING_SNAKE_CASE : str = "summarizer"
SCREAMING_SNAKE_CASE : str = AutoTokenizer
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]:
return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase )
def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple:
return self.model.generate(**_UpperCamelCase )[0]
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any:
return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) | 8 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for attribute in key.split('''.''' ):
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2]
snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
snake_case_ = '''weight_g'''
elif "weight_v" in name:
snake_case_ = '''weight_v'''
elif "weight" in name:
snake_case_ = '''weight'''
elif "bias" in name:
snake_case_ = '''bias'''
else:
snake_case_ = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = full_name.split('''conv_layers.''' )[-1]
snake_case_ = name.split('''.''' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = SEWConfig()
if is_finetuned:
snake_case_ = model.wav_encoder.wav_model.cfg
else:
snake_case_ = model.cfg
snake_case_ = fs_config.conv_bias
snake_case_ = eval(fs_config.conv_feature_layers )
snake_case_ = [x[0] for x in conv_layers]
snake_case_ = [x[1] for x in conv_layers]
snake_case_ = [x[2] for x in conv_layers]
snake_case_ = '''gelu'''
snake_case_ = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
snake_case_ = 0.0
snake_case_ = fs_config.activation_fn.name
snake_case_ = fs_config.encoder_embed_dim
snake_case_ = 0.02
snake_case_ = fs_config.encoder_ffn_embed_dim
snake_case_ = 1E-5
snake_case_ = fs_config.encoder_layerdrop
snake_case_ = fs_config.encoder_attention_heads
snake_case_ = fs_config.conv_pos_groups
snake_case_ = fs_config.conv_pos
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
snake_case_ = fs_config.encoder_layers
snake_case_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case_ = model.cfg
snake_case_ = fs_config.final_dropout
snake_case_ = fs_config.layerdrop
snake_case_ = fs_config.activation_dropout
snake_case_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case_ = fs_config.attention_dropout
snake_case_ = fs_config.dropout_input
snake_case_ = fs_config.dropout
snake_case_ = fs_config.mask_channel_length
snake_case_ = fs_config.mask_channel_prob
snake_case_ = fs_config.mask_length
snake_case_ = fs_config.mask_prob
snake_case_ = '''Wav2Vec2FeatureExtractor'''
snake_case_ = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True ):
if is_finetuned:
snake_case_, snake_case_, snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case_, snake_case_, snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case_ = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = convert_config(model[0] , SCREAMING_SNAKE_CASE__ )
snake_case_ = model[0].eval()
snake_case_ = True if config.feat_extract_norm == '''layer''' else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE__ )
snake_case_ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE__ , )
snake_case_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ = SEWForCTC(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = SEWModel(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
) | 8 |
from collections import deque
from .hash_table import HashTable
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ = self.values[key]
def snake_case__( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase ) | 8 | 1 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL | 8 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "switch_transformers"
SCREAMING_SNAKE_CASE : Tuple = ["past_key_values"]
SCREAMING_SNAKE_CASE : int = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Tuple , _UpperCamelCase : Optional[int]=3_2_1_2_8 , _UpperCamelCase : Any=7_6_8 , _UpperCamelCase : Optional[Any]=6_4 , _UpperCamelCase : List[Any]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=6_4 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : str=1_2 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Tuple=1_2 , _UpperCamelCase : Dict=8 , _UpperCamelCase : Any=False , _UpperCamelCase : Dict=0.01 , _UpperCamelCase : Optional[Any]="float32" , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : List[str]=3_2 , _UpperCamelCase : str=1_2_8 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : List[str]=1e-6 , _UpperCamelCase : Optional[int]=0.001 , _UpperCamelCase : Optional[int]=0.001 , _UpperCamelCase : Any=1.0 , _UpperCamelCase : Optional[int]="relu" , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0 , _UpperCamelCase : List[Any]=1 , **_UpperCamelCase : Tuple , ) ->str:
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = d_kv
snake_case_ = d_ff
snake_case_ = num_sparse_encoder_layers
snake_case_ = num_layers
snake_case_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case_ = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
snake_case_ = self.num_layers // self.num_sparse_encoder_layers
else:
snake_case_ = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
snake_case_ = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
snake_case_ = self.num_decoder_layers # HACK: this will create 0 sparse layers
snake_case_ = num_heads
snake_case_ = num_experts
snake_case_ = expert_capacity
snake_case_ = router_bias
snake_case_ = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
snake_case_ = router_dtype
snake_case_ = router_ignore_padding_tokens
snake_case_ = relative_attention_num_buckets
snake_case_ = relative_attention_max_distance
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_factor
snake_case_ = feed_forward_proj
snake_case_ = use_cache
snake_case_ = add_router_probs
snake_case_ = router_z_loss_coef
snake_case_ = router_aux_loss_coef
snake_case_ = self.feed_forward_proj.split('''-''' )
snake_case_ = act_info[-1]
snake_case_ = act_info[0] == '''gated'''
if len(_UpperCamelCase ) > 1 and act_info[0] != "gated" or len(_UpperCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
snake_case_ = '''gelu_new'''
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase , ) | 8 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
snake_case_ = precision
snake_case_ = ceil(precision / 14 )
snake_case_ = 426880 * Decimal(10005 ).sqrt()
snake_case_ = 1
snake_case_ = 13591409
snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ )
for k in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""") | 8 | 1 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(SCREAMING_SNAKE_CASE__ , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = _distribute_shards(**SCREAMING_SNAKE_CASE__ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = _split_gen_kwargs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if expected is RuntimeError:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
_number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
assert out == expected | 8 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str:
super().__init__(
split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = load_from_cache_file
snake_case_ = file_format
snake_case_ = Spark(
df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , )
def snake_case__( self : int ) ->Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 8 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''DPTFeatureExtractor''']
lowerCAmelCase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "biogpt"
def __init__( self : Any , _UpperCamelCase : Dict=4_2_3_8_4 , _UpperCamelCase : Dict=1_0_2_4 , _UpperCamelCase : Optional[Any]=2_4 , _UpperCamelCase : Union[str, Any]=1_6 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Union[str, Any]=1_0_2_4 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : str=1e-12 , _UpperCamelCase : List[str]=True , _UpperCamelCase : str=True , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : int=1 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Any=2 , **_UpperCamelCase : List[str] , ) ->List[Any]:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = scale_embedding
snake_case_ = use_cache
snake_case_ = layerdrop
snake_case_ = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 8 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = LxmertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase ) | 8 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = "char"
SCREAMING_SNAKE_CASE : Optional[Any] = "bpe"
SCREAMING_SNAKE_CASE : List[str] = "wp"
lowerCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ["image_processor", "char_tokenizer"]
SCREAMING_SNAKE_CASE : str = "ViTImageProcessor"
SCREAMING_SNAKE_CASE : Any = "MgpstrTokenizer"
def __init__( self : Tuple , _UpperCamelCase : int=None , _UpperCamelCase : List[str]=None , **_UpperCamelCase : str ) ->Union[str, Any]:
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCamelCase , )
snake_case_ = kwargs.pop('''feature_extractor''' )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
snake_case_ = tokenizer
snake_case_ = AutoTokenizer.from_pretrained('''gpt2''' )
snake_case_ = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self : Optional[Any] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Union[str, Any]=None , **_UpperCamelCase : Any ) ->str:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case_ = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None:
snake_case_ = self.char_tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case_ = encodings['''input_ids''']
return inputs
def snake_case__( self : Optional[Any] , _UpperCamelCase : Any ) ->Optional[Any]:
snake_case_, snake_case_, snake_case_ = sequences
snake_case_ = char_preds.size(0 )
snake_case_, snake_case_ = self._decode_helper(_UpperCamelCase , '''char''' )
snake_case_, snake_case_ = self._decode_helper(_UpperCamelCase , '''bpe''' )
snake_case_, snake_case_ = self._decode_helper(_UpperCamelCase , '''wp''' )
snake_case_ = []
snake_case_ = []
for i in range(_UpperCamelCase ):
snake_case_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case_ = scores.index(max(_UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case_ = {}
snake_case_ = final_strs
snake_case_ = final_scores
snake_case_ = char_strs
snake_case_ = bpe_strs
snake_case_ = wp_strs
return out
def snake_case__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->str:
if format == DecodeType.CHARACTER:
snake_case_ = self.char_decode
snake_case_ = 1
snake_case_ = '''[s]'''
elif format == DecodeType.BPE:
snake_case_ = self.bpe_decode
snake_case_ = 2
snake_case_ = '''#'''
elif format == DecodeType.WORDPIECE:
snake_case_ = self.wp_decode
snake_case_ = 1_0_2
snake_case_ = '''[SEP]'''
else:
raise ValueError(f'''Format {format} is not supported.''' )
snake_case_, snake_case_ = [], []
snake_case_ = pred_logits.size(0 )
snake_case_ = pred_logits.size(1 )
snake_case_, snake_case_ = pred_logits.topk(1 , dim=-1 , largest=_UpperCamelCase , sorted=_UpperCamelCase )
snake_case_ = preds_index.view(-1 , _UpperCamelCase )[:, 1:]
snake_case_ = decoder(_UpperCamelCase )
snake_case_, snake_case_ = torch.nn.functional.softmax(_UpperCamelCase , dim=2 ).max(dim=2 )
snake_case_ = preds_max_prob[:, 1:]
for index in range(_UpperCamelCase ):
snake_case_ = preds_str[index].find(_UpperCamelCase )
snake_case_ = preds_str[index][:pred_eos]
snake_case_ = preds_index[index].cpu().tolist()
snake_case_ = pred_index.index(_UpperCamelCase ) if eos_token in pred_index else -1
snake_case_ = preds_max_prob[index][: pred_eos_index + 1]
snake_case_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_UpperCamelCase )
conf_scores.append(_UpperCamelCase )
return dec_strs, conf_scores
def snake_case__( self : int , _UpperCamelCase : List[str] ) ->Any:
snake_case_ = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_UpperCamelCase )]
return decode_strs
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] ) ->List[str]:
return self.bpe_tokenizer.batch_decode(_UpperCamelCase )
def snake_case__( self : Tuple , _UpperCamelCase : Union[str, Any] ) ->Dict:
snake_case_ = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_UpperCamelCase )]
return decode_strs | 8 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ):
try:
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ = []
snake_case_ = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : int , _UpperCamelCase : float ) ->float:
return 0.0
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
snake_case_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = 512
snake_case_ = [1] + [0] * (size - 1)
snake_case_ = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
snake_case_ = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case_ = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = 20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
snake_case_ = get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = 512
snake_case_ = [1] + [0] * (size - 1)
snake_case_ = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
snake_case_ = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case_ = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show() | 8 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : Optional[int] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def snake_case__( self : List[Any] ) ->Optional[int]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple:
snake_case_ = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse} | 8 | 1 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = SwinConfig()
snake_case_ = swin_name.split('''_''' )
snake_case_ = name_split[1]
snake_case_ = int(name_split[4] )
snake_case_ = int(name_split[3][-1] )
if model_size == "tiny":
snake_case_ = 96
snake_case_ = (2, 2, 6, 2)
snake_case_ = (3, 6, 12, 24)
elif model_size == "small":
snake_case_ = 96
snake_case_ = (2, 2, 18, 2)
snake_case_ = (3, 6, 12, 24)
elif model_size == "base":
snake_case_ = 128
snake_case_ = (2, 2, 18, 2)
snake_case_ = (4, 8, 16, 32)
else:
snake_case_ = 192
snake_case_ = (2, 2, 18, 2)
snake_case_ = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case_ = 21841
else:
snake_case_ = 1000
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = img_size
snake_case_ = num_classes
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
return config
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if "patch_embed.proj" in name:
snake_case_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
snake_case_ = '''encoder.''' + name
if "attn.proj" in name:
snake_case_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
snake_case_ = '''layernorm.weight'''
if name == "norm.bias":
snake_case_ = '''layernorm.bias'''
if "head" in name:
snake_case_ = name.replace('''head''' , '''classifier''' )
else:
snake_case_ = '''swin.''' + name
return name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "mask" in key:
continue
elif "qkv" in key:
snake_case_ = key.split('''.''' )
snake_case_ = int(key_split[1] )
snake_case_ = int(key_split[3] )
snake_case_ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[
:dim
]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[
-dim:
]
else:
snake_case_ = val
return orig_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
snake_case_ = get_swin_config(SCREAMING_SNAKE_CASE__ )
snake_case_ = SwinForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case_ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
snake_case_ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
snake_case_ = timm_model(inputs['''pixel_values'''] )
snake_case_ = model(**SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path) | 8 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = nums.pop(0 )
snake_case_ = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def backtrack(SCREAMING_SNAKE_CASE__ ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_, snake_case_ = nums[i], nums[start]
backtrack(start + 1 )
snake_case_, snake_case_ = nums[i], nums[start] # backtrack
snake_case_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod() | 8 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
SCREAMING_SNAKE_CASE : int = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE : str = frozenset([] )
SCREAMING_SNAKE_CASE : List[Any] = True
@property
def snake_case__( self : str ) ->int:
snake_case_ = 1
snake_case_ = 4
snake_case_ = (1_6, 1_6)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCamelCase )
return image
def snake_case__( self : Optional[Any] ) ->Dict:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_UpperCamelCase , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_UpperCamelCase , only_cross_attention=_UpperCamelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case_ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''quick_gelu''' , projection_dim=5_1_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=0 ) ->Optional[Any]:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->int:
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = pipe(**_UpperCamelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
snake_case_ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCamelCase , 1e-3 )
def snake_case__( self : Optional[Any] ) ->Tuple:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def snake_case__( self : Union[str, Any] ) ->Tuple:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : Optional[Any] ) ->Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def snake_case__( self : Any ) ->Dict:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def snake_case__( self : List[str] ) ->List[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def snake_case__( self : Any ) ->List[str]:
super().test_save_load_local(expected_max_difference=3e-3 )
def snake_case__( self : List[str] ) ->List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = 2
snake_case_ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case_ = getattr(_UpperCamelCase , scheduler_enum.name )
snake_case_ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case_ = pipe(**_UpperCamelCase )[0]
outputs.append(_UpperCamelCase )
assert check_same_shape(_UpperCamelCase )
@require_torch_gpu
@slow
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any ) ->int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[Any] ) ->Any:
snake_case_ = torch.manual_seed(3_3 )
snake_case_ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case_ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case_ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case_ = pipe(_UpperCamelCase , generator=_UpperCamelCase , output_type='''latent''' ).images
snake_case_ = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2_0 , guidance_scale=0 , generator=_UpperCamelCase , output_type='''np''' , ).images[0]
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def snake_case__( self : Optional[int] ) ->Optional[Any]:
snake_case_ = torch.manual_seed(3_3 )
snake_case_ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case_ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case_ = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2_0 , guidance_scale=0 , generator=_UpperCamelCase , output_type='''np''' , ).images[0]
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2 | 8 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as fh:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_EX )
try:
print(*SCREAMING_SNAKE_CASE__ )
finally:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_UN )
lowerCAmelCase_ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
lowerCAmelCase_ = torch.device('''cuda''', local_rank)
lowerCAmelCase_ = socket.gethostname()
lowerCAmelCase_ = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase_ = dist.get_rank()
lowerCAmelCase_ = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise | 8 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] ) | 8 | 1 |
from statistics import mean
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = 0
# Number of processes finished
snake_case_ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
snake_case_ = [0] * no_of_process
# List to include calculation results
snake_case_ = [0] * no_of_process
# Sort by arrival time.
snake_case_ = [burst_time[i] for i in np.argsort(SCREAMING_SNAKE_CASE__ )]
snake_case_ = [process_name[i] for i in np.argsort(SCREAMING_SNAKE_CASE__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
snake_case_ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
snake_case_ = arrival_time[i]
snake_case_ = 0
# Index showing the location of the process being performed
snake_case_ = 0
# Saves the current response ratio.
snake_case_ = 0
for i in range(0 , SCREAMING_SNAKE_CASE__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
snake_case_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
snake_case_ = temp
snake_case_ = i
# Calculate the turn around time
snake_case_ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
snake_case_ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0] * no_of_process
for i in range(0 , SCREAMING_SNAKE_CASE__ ):
snake_case_ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase_ = 5
lowerCAmelCase_ = ['''A''', '''B''', '''C''', '''D''', '''E''']
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
f"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(f"""average waiting time : {mean(waiting_time):.5f}""")
print(f"""average turn around time : {mean(turn_around_time):.5f}""") | 8 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "vit_msn"
def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias | 8 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # This function is recursive
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
snake_case_ = array[0]
snake_case_ = False
snake_case_ = 1
snake_case_ = []
while not is_found and i < array_length:
if array[i] < pivot:
snake_case_ = True
snake_case_ = [element for element in array[i:] if element >= array[i]]
snake_case_ = longest_subsequence(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
snake_case_ = temp_array
else:
i += 1
snake_case_ = [element for element in array[1:] if element >= pivot]
snake_case_ = [pivot, *longest_subsequence(SCREAMING_SNAKE_CASE__ )]
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
from __future__ import annotations
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : float = 3.0
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCamelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def snake_case__( self : Optional[int] ) ->List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case_ = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
snake_case_ = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
snake_case_ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , _UpperCamelCase )
@require_multi_gpu
def snake_case__( self : Tuple ) ->Any:
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase_ = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 8 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return x + 2
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
snake_case_ = '''x = y'''
snake_case_ = {'''y''': 5}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} )
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = '''y = add_two(x)'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Dict ) ->str:
snake_case_ = '''x = 3\ny = 5'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
def snake_case__( self : str ) ->Tuple:
snake_case_ = '''text = f\'This is x: {x}.\''''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} )
snake_case_ = {'''x''': 8}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} )
def snake_case__( self : str ) ->str:
snake_case_ = '''test_list = [x, add_two(x)]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [3, 5] )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''y = x'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} )
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 0\nfor i in range(3):\n x = i'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} ) | 8 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase_ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase_ = '''path-to-your-trained-model'''
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase_ = pipe.to(device)
# to channels last
lowerCAmelCase_ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase_ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase_ = torch.randn(2, 4, 64, 64)
lowerCAmelCase_ = torch.rand(1) * 9_99
lowerCAmelCase_ = torch.randn(2, 77, 7_68)
lowerCAmelCase_ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase_ = 6_66
lowerCAmelCase_ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase_ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''') | 8 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy'''
def snake_case__( self : Any ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return image
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = '''bf16''' if fpaa else None
snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained(
_UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase )
return model, params
def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict:
snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) | 8 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''ConvNextFeatureExtractor''']
lowerCAmelCase_ = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 8 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main() | 8 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="pt" ):
snake_case_ = {'''add_prefix_space''': True} if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not line.startswith(''' ''' ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = input_ids.ne(SCREAMING_SNAKE_CASE__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any="train" , _UpperCamelCase : str=None , _UpperCamelCase : Any=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Tuple="" , ) ->Optional[Any]:
super().__init__()
snake_case_ = Path(_UpperCamelCase ).joinpath(type_path + '''.source''' )
snake_case_ = Path(_UpperCamelCase ).joinpath(type_path + '''.target''' )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self : List[Any] ) ->str:
return len(self.src_lens )
def __getitem__( self : List[str] , _UpperCamelCase : Optional[Any] ) ->Dict[str, torch.Tensor]:
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , _UpperCamelCase ).rstrip('''\n''' )
snake_case_ = linecache.getline(str(self.tgt_file ) , _UpperCamelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
snake_case_ = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , '''right''' )
snake_case_ = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , '''right''' )
snake_case_ = source_inputs['''input_ids'''].squeeze()
snake_case_ = target_inputs['''input_ids'''].squeeze()
snake_case_ = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__( _UpperCamelCase : str ) ->Optional[Any]:
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def snake_case__( self : List[Any] , _UpperCamelCase : List[str] ) ->Dict[str, torch.Tensor]:
snake_case_ = torch.stack([x['''input_ids'''] for x in batch] )
snake_case_ = torch.stack([x['''attention_mask'''] for x in batch] )
snake_case_ = torch.stack([x['''decoder_input_ids'''] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(_UpperCamelCase , _UpperCamelCase )
snake_case_, snake_case_ = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase )
snake_case_ = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase_ = getLogger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = get_git_info()
save_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''git_log.json''' ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=4 , **SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE__ )
snake_case_ = {
'''repo_id''': str(SCREAMING_SNAKE_CASE__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return list(map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def remove_articles(SCREAMING_SNAKE_CASE__ ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , SCREAMING_SNAKE_CASE__ )
def white_space_fix(SCREAMING_SNAKE_CASE__ ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE__ ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE__ ) ) ) )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = normalize_answer(SCREAMING_SNAKE_CASE__ ).split()
snake_case_ = normalize_answer(SCREAMING_SNAKE_CASE__ ).split()
snake_case_ = Counter(SCREAMING_SNAKE_CASE__ ) & Counter(SCREAMING_SNAKE_CASE__ )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE__ )
snake_case_ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE__ )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return normalize_answer(SCREAMING_SNAKE_CASE__ ) == normalize_answer(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
snake_case_ = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
em += exact_match_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
em /= len(SCREAMING_SNAKE_CASE__ )
return {"em": em}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return model_prefix.startswith('''rag''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = '''dropout_rate'''
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not hasattr(SCREAMING_SNAKE_CASE__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(SCREAMING_SNAKE_CASE__ ) )
delattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
snake_case_ = p if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
delattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return hparams, config | 8 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''') | 8 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = "imagegpt"
SCREAMING_SNAKE_CASE : List[str] = ["past_key_values"]
SCREAMING_SNAKE_CASE : List[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple=5_1_2 + 1 , _UpperCamelCase : List[Any]=3_2 * 3_2 , _UpperCamelCase : Optional[Any]=5_1_2 , _UpperCamelCase : Any=2_4 , _UpperCamelCase : Union[str, Any]=8 , _UpperCamelCase : List[str]=None , _UpperCamelCase : List[str]="quick_gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Dict=0.02 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Tuple=False , _UpperCamelCase : Dict=False , **_UpperCamelCase : Dict , ) ->str:
snake_case_ = vocab_size
snake_case_ = n_positions
snake_case_ = n_embd
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_inner
snake_case_ = activation_function
snake_case_ = resid_pdrop
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = scale_attn_weights
snake_case_ = use_cache
snake_case_ = scale_attn_by_inverse_layer_idx
snake_case_ = reorder_and_upcast_attn
snake_case_ = tie_word_embeddings
super().__init__(tie_word_embeddings=_UpperCamelCase , **_UpperCamelCase )
class snake_case_ ( __A ):
'''simple docstring'''
@property
def snake_case__( self : str ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def snake_case__( self : str , _UpperCamelCase : "FeatureExtractionMixin" , _UpperCamelCase : int = 1 , _UpperCamelCase : int = -1 , _UpperCamelCase : bool = False , _UpperCamelCase : Optional["TensorType"] = None , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , ) ->Mapping[str, Any]:
snake_case_ = self._generate_dummy_images(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ = dict(preprocessor(images=_UpperCamelCase , return_tensors=_UpperCamelCase ) )
return inputs | 8 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase_ = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None:
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case__( self : str ) ->List[Any]:
return self.sp_model.get_piece_size()
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) ->Any:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple:
return self.sp_model.piece_to_id(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str:
snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase )
snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
snake_case_ = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) )
else:
snake_case_ = ''''''.join(_UpperCamelCase )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 8 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = XLMTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def snake_case__( self : List[str] ) ->Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
snake_case_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_UpperCamelCase ) )
def snake_case__( self : Any , _UpperCamelCase : int ) ->Any:
snake_case_ = '''lower newer'''
snake_case_ = '''lower newer'''
return input_text, output_text
def snake_case__( self : Dict ) ->List[Any]:
snake_case_ = XLMTokenizer(self.vocab_file , self.merges_file )
snake_case_ = '''lower'''
snake_case_ = ['''low''', '''er</w>''']
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = tokens + ['''<unk>''']
snake_case_ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@slow
def snake_case__( self : Any ) ->Dict:
snake_case_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
snake_case_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 8 |
from __future__ import annotations
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {}
snake_case_ = 2
while True:
snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if factor:
snake_case_ = factor + prime
while x in factor_map:
x += factor
snake_case_ = factor
else:
snake_case_ = prime
yield prime
prime += 1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ):
snake_case_ = sieve()
snake_case_ = 1
while True:
snake_case_ = next(SCREAMING_SNAKE_CASE__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE__ )
n += 2
if __name__ == "__main__":
print(solution()) | 8 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_ = '''pt'''
elif is_tf_available():
lowerCAmelCase_ = '''tf'''
else:
lowerCAmelCase_ = '''jax'''
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ByTaTokenizer
SCREAMING_SNAKE_CASE : str = False
def snake_case__( self : Optional[Any] ) ->Any:
super().setUp()
snake_case_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__( self : List[str] ) ->Any:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def snake_case__( self : str , **_UpperCamelCase : Optional[Any] ) ->ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : str=2_0 , _UpperCamelCase : Any=5 ) ->Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
snake_case_ = []
for i in range(len(_UpperCamelCase ) ):
try:
snake_case_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case_ = list(filter(lambda _UpperCamelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _UpperCamelCase ) )
snake_case_ = list(filter(lambda _UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCamelCase ) , _UpperCamelCase ) )
if max_length is not None and len(_UpperCamelCase ) > max_length:
snake_case_ = toks[:max_length]
if min_length is not None and len(_UpperCamelCase ) < min_length and len(_UpperCamelCase ) > 0:
while len(_UpperCamelCase ) < min_length:
snake_case_ = toks + toks
# toks_str = [t[1] for t in toks]
snake_case_ = [t[0] for t in toks]
# Ensure consistency
snake_case_ = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
if " " not in output_txt and len(_UpperCamelCase ) > 1:
snake_case_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCamelCase )
)
if with_prefix_space:
snake_case_ = ''' ''' + output_txt
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
return output_txt, output_ids
def snake_case__( self : Optional[int] ) ->Optional[Any]:
snake_case_ = self.ta_base_tokenizer
snake_case_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
snake_case_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = self.ta_base_tokenizer
snake_case_ = '''Unicode €.'''
snake_case_ = tokenizer(_UpperCamelCase )
snake_case_ = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['''input_ids'''] , _UpperCamelCase )
# decoding
snake_case_ = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , '''Unicode €.</s>''' )
snake_case_ = tokenizer('''e è é ê ë''' )
snake_case_ = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['''input_ids'''] , _UpperCamelCase )
# decoding
snake_case_ = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def snake_case__( self : List[Any] ) ->Tuple:
snake_case_ = self.ta_base_tokenizer
snake_case_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
snake_case_ = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
snake_case_ = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
if FRAMEWORK != "jax":
snake_case_ = list(batch.input_ids.numpy()[0] )
else:
snake_case_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = self.ta_base_tokenizer
snake_case_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
snake_case_ = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _UpperCamelCase )
self.assertIn('''attention_mask''' , _UpperCamelCase )
self.assertNotIn('''decoder_input_ids''' , _UpperCamelCase )
self.assertNotIn('''decoder_attention_mask''' , _UpperCamelCase )
def snake_case__( self : Dict ) ->List[str]:
snake_case_ = self.ta_base_tokenizer
snake_case_ = [
'''Summary of the text.''',
'''Another summary.''',
]
snake_case_ = tokenizer(
text_target=_UpperCamelCase , max_length=3_2 , padding='''max_length''' , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = self.ta_base_tokenizer
snake_case_ = ['''A long paragraph for summarization. </s>''']
snake_case_ = ['''Summary of the text. </s>''']
# fmt: off
snake_case_ = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
snake_case_ = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
snake_case_ = tokenizer(_UpperCamelCase , text_target=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , batch['''input_ids'''][0] )
self.assertEqual(_UpperCamelCase , batch['''labels'''][0] )
def snake_case__( self : Optional[Any] ) ->int:
# safety check on max_len default value so we are sure the test works
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ''' He is very happy, UNwant\u00E9d,running'''
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
snake_case_ = tokenizer.__class__.from_pretrained(_UpperCamelCase )
snake_case_ = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
snake_case_ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
snake_case_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
snake_case_ = tokenizer.__class__.from_pretrained(_UpperCamelCase )
snake_case_ = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
snake_case_ = tokenizer.__class__.from_pretrained(_UpperCamelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->List[Any]:
snake_case_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
snake_case_ = json.load(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
snake_case_ = json.load(_UpperCamelCase )
snake_case_ = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
snake_case_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
snake_case_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_UpperCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case_ = tokenizer_class.from_pretrained(
_UpperCamelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_UpperCamelCase )]
snake_case_ = tokenizer_class.from_pretrained(
_UpperCamelCase , additional_special_tokens=_UpperCamelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def snake_case__( self : str ) ->List[str]:
snake_case_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_class.from_pretrained(_UpperCamelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == '''''' )
def snake_case__( self : Tuple ) ->Optional[int]:
pass
def snake_case__( self : int ) ->str:
pass
def snake_case__( self : Tuple ) ->str:
pass
def snake_case__( self : int ) ->Optional[Any]:
pass
def snake_case__( self : Tuple ) ->Dict:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
snake_case_ = self.get_tokenizers(fast=_UpperCamelCase , do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
snake_case_ = tokenizer.convert_tokens_to_string(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[Any] ) ->Tuple:
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
snake_case_ = 0
snake_case_ = tokenizer.convert_ids_to_tokens(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
for attr in attributes_list:
setattr(_UpperCamelCase , attr + '''_id''' , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + '''_id''' ) , _UpperCamelCase )
setattr(_UpperCamelCase , attr + '''_id''' , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + '''_id''' ) , _UpperCamelCase )
setattr(_UpperCamelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens_ids''' ) , [] )
setattr(_UpperCamelCase , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] ) | 8 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum"
SCREAMING_SNAKE_CASE : Tuple = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
SCREAMING_SNAKE_CASE : str = "summarizer"
SCREAMING_SNAKE_CASE : str = AutoTokenizer
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]:
return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase )
def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple:
return self.model.generate(**_UpperCamelCase )[0]
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any:
return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) | 8 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCAmelCase_ = tuple[int, int]
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : set[int] , _UpperCamelCase : Mapping[EdgeT, int] ) ->None:
snake_case_ = vertices
snake_case_ = {
(min(_UpperCamelCase ), max(_UpperCamelCase )): weight for edge, weight in edges.items()
}
def snake_case__( self : Union[str, Any] , _UpperCamelCase : EdgeT , _UpperCamelCase : int ) ->None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
snake_case_ = weight
def snake_case__( self : Dict ) ->Graph:
snake_case_ = Graph({min(self.vertices )} , {} )
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
while len(subgraph.vertices ) < len(self.vertices ):
snake_case_ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
snake_case_ = edge
snake_case_ = weight
subgraph.add_edge(_UpperCamelCase , _UpperCamelCase )
return subgraph
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "p107_network.txt" ):
snake_case_ = os.path.abspath(os.path.dirname(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = {}
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ = f.read().strip().split('''\n''' )
snake_case_ = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
for edgea in range(SCREAMING_SNAKE_CASE__ ):
if adjaceny_matrix[edgea][edgea] != "-":
snake_case_ = int(adjaceny_matrix[edgea][edgea] )
snake_case_ = Graph(set(range(len(SCREAMING_SNAKE_CASE__ ) ) ) , SCREAMING_SNAKE_CASE__ )
snake_case_ = graph.prims_algorithm()
snake_case_ = sum(graph.edges.values() )
snake_case_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
from collections import deque
from .hash_table import HashTable
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ = self.values[key]
def snake_case__( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase ) | 8 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = "fnet"
def __init__( self : List[Any] , _UpperCamelCase : Tuple=3_2_0_0_0 , _UpperCamelCase : Optional[Any]=7_6_8 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Optional[int]="gelu_new" , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Tuple=1e-12 , _UpperCamelCase : List[str]=False , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=1 , _UpperCamelCase : List[Any]=2 , **_UpperCamelCase : List[str] , ) ->Union[str, Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_tpu_fourier_optimizations
snake_case_ = tpu_short_seq_length | 8 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
def _a ( a :int = 10 , a :int = 22 ) -> int:
a = range(1 , a )
a = range(1 , a )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
snake_case_ = precision
snake_case_ = ceil(precision / 14 )
snake_case_ = 426880 * Decimal(10005 ).sqrt()
snake_case_ = 1
snake_case_ = 13591409
snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ )
for k in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""") | 8 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __A ( unittest.TestCase , UpperCamelCase__ ):
def _lowercase (self : Any ):
UpperCAmelCase_ = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase_ = load_tool("text-classification" , remote=__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__a , "positive" )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__a , "positive" )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__a , "positive" )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__a , "positive" )
| 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str:
super().__init__(
split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = load_from_cache_file
snake_case_ = file_format
snake_case_ = Spark(
df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , )
def snake_case__( self : int ) ->Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 8 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
lowerCamelCase : Optional[int] = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
lowerCamelCase : Optional[Any] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = cached_file(UpperCamelCase , UpperCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase , UpperCamelCase ) ) )
with open(os.path.join(UpperCamelCase , '''refs''' , '''main''' ) ) as f:
lowercase__ = f.read()
self.assertEqual(UpperCamelCase , os.path.join(UpperCamelCase , '''snapshots''' , UpperCamelCase , UpperCamelCase ) )
self.assertTrue(os.path.isfile(UpperCamelCase ) )
# File is cached at the same place the second time.
lowercase__ = cached_file(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
# Using a specific revision to test the full commit hash.
lowercase__ = cached_file(UpperCamelCase , UpperCamelCase , revision='''9b8c223''' )
self.assertEqual(UpperCamelCase , os.path.join(UpperCamelCase , '''snapshots''' , UpperCamelCase , UpperCamelCase ) )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase , '''is not a valid model identifier''' ):
lowercase__ = cached_file('''tiny-random-bert''' , UpperCamelCase )
with self.assertRaisesRegex(UpperCamelCase , '''is not a valid git identifier''' ):
lowercase__ = cached_file(UpperCamelCase , UpperCamelCase , revision='''aaaa''' )
with self.assertRaisesRegex(UpperCamelCase , '''does not appear to have a file named''' ):
lowercase__ = cached_file(UpperCamelCase , '''conf''' )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase , '''does not appear to have a file named''' ):
lowercase__ = cached_file(UpperCamelCase , '''conf''' )
with open(os.path.join(UpperCamelCase , '''refs''' , '''main''' ) ) as f:
lowercase__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , '''.no_exist''' , UpperCamelCase , '''conf''' ) ) )
lowercase__ = cached_file(UpperCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=UpperCamelCase )
self.assertIsNone(UpperCamelCase )
lowercase__ = cached_file(UpperCamelCase , '''conf''' , local_files_only=UpperCamelCase , _raise_exceptions_for_missing_entries=UpperCamelCase )
self.assertIsNone(UpperCamelCase )
lowercase__ = mock.Mock()
lowercase__ = 500
lowercase__ = {}
lowercase__ = HTTPError
lowercase__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase ) as mock_head:
lowercase__ = cached_file(UpperCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=UpperCamelCase )
self.assertIsNone(UpperCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase ) )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , UpperCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , UpperCamelCase , revision='''ahaha''' )
lowercase__ = get_file_from_repo('''bert-base-cased''' , UpperCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__ = json.loads(open(UpperCamelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = Path(UpperCamelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase , '''a.txt''' ) , str(UpperCamelCase ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase , '''b.txt''' ) )
| 2 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''DPTFeatureExtractor''']
lowerCAmelCase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : Any = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 3 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = LxmertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase ) | 8 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Dict=1.0 , lowerCamelCase : Dict=None , lowerCamelCase : str=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : str=4_0_0 , UpperCAmelCase__ : List[Any]=2_0_0_0 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : List[Any]=1_6_0_0_0 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , ) -> List[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Any ) -> List[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Optional[int]=False ) -> Dict:
def _flatten(UpperCAmelCase__ : Tuple ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = WavaVecaFeatureExtractor
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase = WavaVecaFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(UpperCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
lowerCAmelCase = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = feat_extract(UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding=UpperCAmelCase__ )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=1_0_0_0 , padding='max_length' , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=1_0_0_0 , padding='longest' , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=2_0_0_0 , padding='longest' , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCAmelCase = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 4 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ):
try:
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ = []
snake_case_ = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[Any]:
"""simple docstring"""
inspect_dataset(__snake_case , __snake_case )
_lowercase =path + '''.py'''
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Any:
"""simple docstring"""
inspect_metric(__snake_case , __snake_case )
_lowercase =path + '''.py'''
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =get_dataset_config_info(__snake_case , config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> int:
"""simple docstring"""
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case , config_name=__snake_case )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
_lowercase =get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
_lowercase =expected_configs[0]
assert expected_config in infos
_lowercase =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =get_dataset_infos(__snake_case )
assert expected_config in infos
_lowercase =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case , config_name=__snake_case )
| 5 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : Optional[int] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def snake_case__( self : List[Any] ) ->Optional[int]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple:
snake_case_ = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse} | 8 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A( a , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ['''image''']
snake_case_ = ['''image''']
snake_case_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__a = CLIPVisionModel(_snake_case )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=_snake_case , do_normalize=_snake_case , do_resize=_snake_case , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__a = PriorTransformer(**_snake_case )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__a = ShapERenderer(**_snake_case )
return model
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_image_processor
__a = self.dummy_renderer
__a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=_snake_case , clip_sample=_snake_case , clip_sample_range=1.0 , )
__a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> List[Any]:
'''simple docstring'''
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('''mps''' ):
__a = torch.manual_seed(_snake_case )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__a = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_snake_case )
__a = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = pipe(**self.get_dummy_inputs(_snake_case ) )
__a = output.images[0]
__a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__a = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = torch_device == '''cpu'''
__a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_snake_case , relax_max_difference=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_snake_case )
__a = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = 1
__a = 2
__a = self.get_dummy_inputs(_snake_case )
for key in inputs.keys():
if key in self.batch_params:
__a = batch_size * [inputs[key]]
__a = pipe(**_snake_case , num_images_per_prompt=_snake_case )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
__a = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
__a = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = pipe(
_snake_case , generator=_snake_case , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_snake_case , _snake_case ) | 6 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = nums.pop(0 )
snake_case_ = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def backtrack(SCREAMING_SNAKE_CASE__ ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_, snake_case_ = nums[i], nums[start]
backtrack(start + 1 )
snake_case_, snake_case_ = nums[i], nums[start] # backtrack
snake_case_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod() | 8 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : str,lowercase_ : Optional[int]=3,lowercase_ : Optional[Any]=3_2,lowercase_ : str=3,lowercase_ : List[str]=1_0,lowercase_ : List[Any]=[1_0, 2_0, 3_0, 4_0],lowercase_ : Dict=[1, 1, 2, 1],lowercase_ : List[str]=True,lowercase_ : Tuple=True,lowercase_ : Optional[Any]="relu",lowercase_ : Tuple=3,lowercase_ : Any=None,)-> Tuple:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(lowercase_ )
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = self.get_config()
return config, pixel_values
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,)
def snake_case__ ( self : List[Any],lowercase_ : Optional[Any],lowercase_ : List[str] )-> List[str]:
'''simple docstring'''
A__ = FlaxRegNetModel(config=lowercase_ )
A__ = model(lowercase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2),)
def snake_case__ ( self : Any,lowercase_ : int,lowercase_ : List[str] )-> Optional[Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = FlaxRegNetForImageClassification(config=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : Tuple )-> None:
'''simple docstring'''
A__ = FlaxRegNetModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
return
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
pass
def snake_case__ ( self : int )-> Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1],lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[str],lowercase_ : List[Any],lowercase_ : Optional[int] ):
A__ = model_class(lowercase_ )
A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ),expected_num_stages + 1 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(lowercase_,lowercase_,lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(lowercase_,lowercase_ )
A__ = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : int,**lowercase_ : Dict ):
return model(pixel_values=lowercase_,**lowercase_ )
with self.subTest('JIT Enabled' ):
A__ = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A__ = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ),len(lowercase_ ) )
for jitted_output, output in zip(lowercase_,lowercase_ ):
self.assertEqual(jitted_output.shape,output.shape )
def _snake_case( ) -> Dict:
'''simple docstring'''
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Dict )-> Optional[int]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase_,return_tensors='np' )
A__ = model(**lowercase_ )
# verify the logits
A__ = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3],lowercase_,atol=1E-4 ) )
| 7 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = job['''started_at''']
__SCREAMING_SNAKE_CASE : List[str] = job['''completed_at''']
__SCREAMING_SNAKE_CASE : List[str] = date_parser.parse(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = date_parser.parse(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__SCREAMING_SNAKE_CASE : Any = start
__SCREAMING_SNAKE_CASE : Optional[int] = end
__SCREAMING_SNAKE_CASE : Dict = duration_in_min
return job_info
def _UpperCamelCase ( lowercase__ , lowercase__=None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if token is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE : int = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__SCREAMING_SNAKE_CASE : int = requests.get(lowercase__ , headers=lowercase__ ).json()
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} )
__SCREAMING_SNAKE_CASE : Optional[int] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = requests.get(url + F'''&page={i + 2}''' , headers=lowercase__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__lowerCAmelCase : Tuple =parser.parse_args()
__lowerCAmelCase : Any =get_job_time(args.workflow_run_id)
__lowerCAmelCase : int =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 9 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] ) | 8 | 0 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase_ ( __a , __a = "cpu" , __a = None ) -> None:
"""simple docstring"""
lowerCamelCase__: int =torch.load(__a , map_location=__a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__a , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowerCamelCase__: Union[str, Any] =v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__: List[str] =src_path
torch.save(__a , __a )
if __name__ == "__main__":
fire.Fire(convert)
| 10 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "vit_msn"
def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias | 8 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 |
from __future__ import annotations
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCAmelCase_ = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def lowerCamelCase__ ( A__ : int , A__ : List[Any] ):
'''simple docstring'''
warnings.warn(A__ , A__ )
requires_backends(A__ , """sklearn""" )
return (preds == labels).mean()
def lowerCamelCase__ ( A__ : List[str] , A__ : Any ):
'''simple docstring'''
warnings.warn(A__ , A__ )
requires_backends(A__ , """sklearn""" )
__lowerCamelCase = simple_accuracy(A__ , A__ )
__lowerCamelCase = fa_score(y_true=A__ , y_pred=A__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] ):
'''simple docstring'''
warnings.warn(A__ , A__ )
requires_backends(A__ , """sklearn""" )
__lowerCamelCase = pearsonr(A__ , A__ )[0]
__lowerCamelCase = spearmanr(A__ , A__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowerCamelCase__ ( A__ : Tuple , A__ : List[str] , A__ : int ):
'''simple docstring'''
warnings.warn(A__ , A__ )
requires_backends(A__ , """sklearn""" )
assert len(A__ ) == len(A__ ), f'Predictions and labels have mismatched lengths {len(A__ )} and {len(A__ )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(A__ , A__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(A__ , A__ )}
elif task_name == "mrpc":
return acc_and_fa(A__ , A__ )
elif task_name == "sts-b":
return pearson_and_spearman(A__ , A__ )
elif task_name == "qqp":
return acc_and_fa(A__ , A__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(A__ , A__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(A__ , A__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(A__ , A__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(A__ , A__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(A__ , A__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(A__ , A__ )}
else:
raise KeyError(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : int , A__ : Optional[int] ):
'''simple docstring'''
warnings.warn(A__ , A__ )
requires_backends(A__ , """sklearn""" )
if len(A__ ) != len(A__ ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(A__ )} and {len(A__ )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(A__ , A__ )}
else:
raise KeyError(A__ )
| 12 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return x + 2
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
snake_case_ = '''x = y'''
snake_case_ = {'''y''': 5}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} )
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = '''y = add_two(x)'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Dict ) ->str:
snake_case_ = '''x = 3\ny = 5'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
def snake_case__( self : str ) ->Tuple:
snake_case_ = '''text = f\'This is x: {x}.\''''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} )
snake_case_ = {'''x''': 8}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} )
def snake_case__( self : str ) ->str:
snake_case_ = '''test_list = [x, add_two(x)]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [3, 5] )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''y = x'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} )
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 0\nfor i in range(3):\n x = i'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} ) | 8 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy'''
def snake_case__( self : Any ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return image
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = '''bf16''' if fpaa else None
snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained(
_UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase )
return model, params
def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict:
snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) | 8 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(lowercase_ ), magnitude * sin(lowercase_ )]
return [magnitude * cos(radians(lowercase_ ) ), magnitude * sin(radians(lowercase_ ) )]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 10**-1 ) -> bool:
"""simple docstring"""
A__ = cross(lowercase_ , lowercase_ )
A__ = sum(lowercase_ )
return abs(lowercase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
_lowerCamelCase : Optional[Any] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_lowerCamelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_lowerCamelCase : Union[str, Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_lowerCamelCase : Dict = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_lowerCamelCase : Dict = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
_lowerCamelCase : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 14 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 8 | 0 |
SCREAMING_SNAKE_CASE :int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
SCREAMING_SNAKE_CASE :Optional[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase ( a_ , a_ , a_ ) -> list[int]:
"""simple docstring"""
__A = True
__A = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(a_ , a_ , a_ )
order.append(a_ )
return order
def UpperCAmelCase ( a_ , a_ , a_ ) -> list[int]:
"""simple docstring"""
__A = True
__A = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(a_ , a_ , a_ )
return component
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
__A = len(a_ ) * [False]
__A = {vert: [] for vert in range(len(a_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(a_ )
__A = []
for i, was_visited in enumerate(a_ ):
if not was_visited:
order += topology_sort(a_ , a_ , a_ )
__A = []
__A = len(a_ ) * [False]
for i in range(len(a_ ) ):
__A = order[len(a_ ) - i - 1]
if not visited[vert]:
__A = find_components(a_ , a_ , a_ )
components_list.append(a_ )
return components_list
| 15 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main() | 8 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['LayoutLMv3FeatureExtractor']
lowerCAmelCase_ = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''') | 8 | 0 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ):
__lowercase = ""
__lowercase = ""
__lowercase = []
def _lowercase ( self : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__lowercase = self.__min_dist_top_down_dp(m - 1, n - 1 )
else:
__lowercase = self.__min_dist_top_down_dp(UpperCAmelCase__, n - 1 )
__lowercase = self.__min_dist_top_down_dp(m - 1, UpperCAmelCase__ )
__lowercase = self.__min_dist_top_down_dp(m - 1, n - 1 )
__lowercase = 1 + min(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return self.dp[m][n]
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : str ):
__lowercase = worda
__lowercase = worda
__lowercase = [[-1 for _ in range(len(UpperCAmelCase__ ) )] for _ in range(len(UpperCAmelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCAmelCase__ ) - 1, len(UpperCAmelCase__ ) - 1 )
def _lowercase ( self : int, UpperCAmelCase__ : str, UpperCAmelCase__ : str ):
__lowercase = worda
__lowercase = worda
__lowercase = len(UpperCAmelCase__ )
__lowercase = len(UpperCAmelCase__ )
__lowercase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__lowercase = j
elif j == 0: # second string is empty
__lowercase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__lowercase = self.dp[i - 1][j - 1]
else:
__lowercase = self.dp[i][j - 1]
__lowercase = self.dp[i - 1][j]
__lowercase = self.dp[i - 1][j - 1]
__lowercase = 1 + min(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
_a = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
_a = input('Enter the first string: ').strip()
_a = input('Enter the second string: ').strip()
print()
print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 17 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase_ = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None:
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case__( self : str ) ->List[Any]:
return self.sp_model.get_piece_size()
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) ->Any:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple:
return self.sp_model.piece_to_id(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str:
snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase )
snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
snake_case_ = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) )
else:
snake_case_ = ''''''.join(_UpperCamelCase )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 8 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__lowerCamelCase : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__lowerCamelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class a__ ( A__ ):
A = 'whisper'
A = ['past_key_values']
A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any],_A : Any=5_1865,_A : Optional[int]=80,_A : List[str]=6,_A : List[str]=4,_A : List[Any]=6,_A : Tuple=4,_A : Any=1536,_A : List[str]=1536,_A : Union[str, Any]=0.0,_A : Dict=0.0,_A : str=5_0257,_A : Optional[int]=True,_A : Optional[Any]=True,_A : Union[str, Any]="gelu",_A : List[str]=256,_A : Union[str, Any]=0.0,_A : Union[str, Any]=0.0,_A : Union[str, Any]=0.0,_A : Union[str, Any]=0.02,_A : int=False,_A : Tuple=1500,_A : Optional[Any]=448,_A : List[Any]=5_0256,_A : Tuple=5_0256,_A : Dict=5_0256,_A : Dict=None,_A : Union[str, Any]=[220, 5_0256],_A : Optional[int]=False,_A : int=256,_A : str=False,_A : Optional[int]=0.05,_A : List[Any]=10,_A : Dict=2,_A : str=0.0,_A : Union[str, Any]=10,_A : Optional[int]=0,_A : List[Any]=7,**_A : int,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Any = num_mel_bins
SCREAMING_SNAKE_CASE_ : Dict = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = decoder_layers
SCREAMING_SNAKE_CASE_ : int = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Tuple = dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = activation_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE_ : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : int = max_source_positions
SCREAMING_SNAKE_CASE_ : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : List[str] = classifier_proj_size
SCREAMING_SNAKE_CASE_ : Tuple = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : List[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE_ : Tuple = mask_time_prob
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_length
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : List[Any] = mask_feature_prob
SCREAMING_SNAKE_CASE_ : str = mask_feature_length
SCREAMING_SNAKE_CASE_ : Dict = mask_feature_min_masks
SCREAMING_SNAKE_CASE_ : Dict = median_filter_width
super().__init__(
pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,is_encoder_decoder=_A,decoder_start_token_id=_A,suppress_tokens=_A,begin_suppress_tokens=_A,**_A,)
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ : str = {0: "batch"}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_A,direction="inputs" )
return common_inputs
def __UpperCamelCase ( self : Any,_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional["TensorType"] = None,_A : int = 2_2050,_A : float = 5.0,_A : int = 220,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = OrderedDict()
SCREAMING_SNAKE_CASE_ : Tuple = OnnxConfig.generate_dummy_inputs(
self,preprocessor=preprocessor.feature_extractor,batch_size=_A,framework=_A,sampling_rate=_A,time_duration=_A,frequency=_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_inputs["input_features"].shape[2]
SCREAMING_SNAKE_CASE_ : int = encoder_sequence_length // 2 if self.use_past else seq_length
SCREAMING_SNAKE_CASE_ : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer,_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_inputs.pop("input_features" )
SCREAMING_SNAKE_CASE_ : List[str] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
SCREAMING_SNAKE_CASE_ : Any = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return 1E-3
| 18 |
from __future__ import annotations
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {}
snake_case_ = 2
while True:
snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if factor:
snake_case_ = factor + prime
while x in factor_map:
x += factor
snake_case_ = factor
else:
snake_case_ = prime
yield prime
prime += 1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ):
snake_case_ = sieve()
snake_case_ = 1
while True:
snake_case_ = next(SCREAMING_SNAKE_CASE__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE__ )
n += 2
if __name__ == "__main__":
print(solution()) | 8 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowerCamelCase_ = BlipaProcessor(lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase_ = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCamelCase_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(lowercase , return_tensors="np" )
lowerCamelCase_ = processor(images=lowercase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = processor(text=lowercase )
lowerCamelCase_ = tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(lowercase )
lowerCamelCase_ = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase : Optional[Any] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
lowercase : Optional[Any] = get_tests_dir("""fixtures/vocab.json""")
lowercase : int = get_tests_dir("""fixtures""")
class __snake_case ( unittest.TestCase ):
_a : Union[str, Any]= ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = 0
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Optional[int] = WavaVecaConfig()
lowercase : Any = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
lowercase : Optional[Any] = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case ,os.path.join(snake_case ,snake_case ) )
copyfile(snake_case ,os.path.join(snake_case ,"""vocab.json""" ) )
lowercase : str = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Optional[int] = WavaVecaFeatureExtractor()
lowercase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase : List[str] = WavaVecaProcessor(snake_case ,snake_case )
# save in new folder
processor.save_pretrained(snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case ,snake_case ) ,"""r""" ) as f:
lowercase : Optional[int] = json.load(snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case ,snake_case ) ,"""w""" ) as f:
f.write(json.dumps(snake_case ) )
lowercase : str = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[str] = WavaVecaFeatureExtractor()
lowercase : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase : List[str] = WavaVecaProcessor(snake_case ,snake_case )
# save in new folder
processor.save_pretrained(snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case ,snake_case ) ,"""r""" ) as f:
lowercase : str = json.load(snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case ,snake_case ) ,"""w""" ) as f:
f.write(json.dumps(snake_case ) )
lowercase : Tuple = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Tuple = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(snake_case )
# copy relevant files
copyfile(snake_case ,os.path.join(snake_case ,"""vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(snake_case ,snake_case ) ,"""w""" ) as f:
f.write("""{}""" )
lowercase : str = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaises(snake_case ):
lowercase : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case ):
lowercase : Tuple = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=snake_case )
lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ ,"""NewProcessor""" )
lowercase : Optional[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ ,"""NewFeatureExtractor""" )
lowercase : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizerFast""" )
# Test we can also load the slow version
lowercase : Optional[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=snake_case ,use_fast=snake_case )
lowercase : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ ,"""NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizer""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,snake_case )
AutoFeatureExtractor.register(snake_case ,snake_case )
AutoTokenizer.register(snake_case ,slow_tokenizer_class=snake_case )
AutoProcessor.register(snake_case ,snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
AutoProcessor.register(snake_case ,snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase : Tuple = CustomFeatureExtractor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[str] = os.path.join(snake_case ,"""vocab.txt""" )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase : int = CustomTokenizer(snake_case )
lowercase : Union[str, Any] = CustomProcessor(snake_case ,snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case )
lowercase : str = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case ,snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= False
class __snake_case ( lowerCAmelCase ):
_a : Optional[int]= False
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= "AutoFeatureExtractor"
_a : Union[str, Any]= "AutoTokenizer"
_a : str= False
try:
AutoConfig.register("""custom""" ,snake_case )
AutoFeatureExtractor.register(snake_case ,snake_case )
AutoTokenizer.register(snake_case ,slow_tokenizer_class=snake_case )
AutoProcessor.register(snake_case ,snake_case )
# If remote code is not set, the default is to use local classes.
lowercase : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ ,"""NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase : Dict = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=snake_case )
self.assertEqual(processor.__class__.__name__ ,"""NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase : Union[str, Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=snake_case )
self.assertEqual(processor.__class__.__name__ ,"""NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ ,"""BertTokenizerFast""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ ,"""ConvNextImageProcessor""" )
@is_staging_test
class __snake_case ( unittest.TestCase ):
_a : Dict= ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
lowercase : List[Any] = TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = WavaVecaProcessor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case ,"""test-processor""" ) ,push_to_hub=snake_case ,use_auth_token=self._token )
lowercase : List[str] = WavaVecaProcessor.from_pretrained(f"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case ,getattr(new_processor.feature_extractor ,snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = WavaVecaProcessor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case ,"""test-processor-org""" ) ,push_to_hub=snake_case ,use_auth_token=self._token ,organization="""valid_org""" ,)
lowercase : Optional[int] = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case ,getattr(new_processor.feature_extractor ,snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase : Dict = CustomFeatureExtractor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[Any] = os.path.join(snake_case ,"""vocab.txt""" )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase : List[Any] = CustomTokenizer(snake_case )
lowercase : Optional[Any] = CustomProcessor(snake_case ,snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"{USER}/test-dynamic-processor" ,token=self._token )
lowercase : Dict = Repository(snake_case ,clone_from=f"{USER}/test-dynamic-processor" ,token=self._token )
processor.save_pretrained(snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map ,{
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} ,)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case ,"""tokenizer_config.json""" ) ) as f:
lowercase : Dict = json.load(snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] ,{
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} ,)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case ,"""custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case ,"""custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case ,"""custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase : Any = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor" ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ ,"""CustomProcessor""" )
| 20 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum"
SCREAMING_SNAKE_CASE : Tuple = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
SCREAMING_SNAKE_CASE : str = "summarizer"
SCREAMING_SNAKE_CASE : str = AutoTokenizer
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]:
return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase )
def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple:
return self.model.generate(**_UpperCamelCase )[0]
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any:
return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) | 8 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """deformable_detr"""
lowercase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=3_00, lowerCamelCase=10_24, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase="resnet50", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=False, lowerCamelCase=3_00, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : List[str] = backbone_config.get('model_type')
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Optional[int] = config_class.from_dict(lowerCamelCase)
_lowercase : Tuple = use_timm_backbone
_lowercase : List[str] = backbone_config
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = num_queries
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Optional[int] = d_model
_lowercase : int = encoder_ffn_dim
_lowercase : List[Any] = encoder_layers
_lowercase : str = encoder_attention_heads
_lowercase : str = decoder_ffn_dim
_lowercase : Optional[Any] = decoder_layers
_lowercase : List[str] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Any = activation_function
_lowercase : Optional[int] = init_std
_lowercase : int = init_xavier_std
_lowercase : Union[str, Any] = encoder_layerdrop
_lowercase : Tuple = auxiliary_loss
_lowercase : Union[str, Any] = position_embedding_type
_lowercase : str = backbone
_lowercase : List[Any] = use_pretrained_backbone
_lowercase : Any = dilation
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : Dict = encoder_n_points
_lowercase : Dict = decoder_n_points
_lowercase : Dict = two_stage
_lowercase : Union[str, Any] = two_stage_num_proposals
_lowercase : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : Tuple = class_cost
_lowercase : int = bbox_cost
_lowercase : Optional[int] = giou_cost
# Loss coefficients
_lowercase : Optional[Any] = mask_loss_coefficient
_lowercase : Dict = dice_loss_coefficient
_lowercase : Tuple = bbox_loss_coefficient
_lowercase : Optional[int] = giou_loss_coefficient
_lowercase : Union[str, Any] = eos_coefficient
_lowercase : Union[str, Any] = focal_alpha
_lowercase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_lowercase : Union[str, Any] = self.backbone_config.to_dict()
_lowercase : Tuple = self.__class__.model_type
return output
| 21 |
from collections import deque
from .hash_table import HashTable
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ = self.values[key]
def snake_case__( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase ) | 8 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[Any] = """mobilenet_v2"""
def __init__( self : str , snake_case_ : List[str]=3 , snake_case_ : Any=2_2_4 , snake_case_ : Union[str, Any]=1.0 , snake_case_ : int=8 , snake_case_ : List[str]=8 , snake_case_ : Dict=6 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]="relu6" , snake_case_ : int=True , snake_case_ : Any=0.8 , snake_case_ : List[str]=0.0_2 , snake_case_ : Optional[int]=0.0_0_1 , snake_case_ : Dict=2_5_5 , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = depth_multiplier
_UpperCAmelCase = depth_divisible_by
_UpperCAmelCase = min_depth
_UpperCAmelCase = expand_ratio
_UpperCAmelCase = output_stride
_UpperCAmelCase = first_layer_is_expansion
_UpperCAmelCase = finegrained_output
_UpperCAmelCase = hidden_act
_UpperCAmelCase = tf_padding
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = semantic_loss_ignore_index
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = version.parse("""1.11""" )
@property
def lowercase ( self : Optional[int] ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase ( self : Union[str, Any] ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase ( self : List[Any] ):
return 1e-4
| 22 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__: int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = XLMProphetNetTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def A ( self : Dict ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Dict = XLMProphetNetTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Any ) -> Tuple:
UpperCAmelCase : Any = '''[PAD]'''
UpperCAmelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__snake_case ) , 1012 )
def A ( self : int ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A ( self : Tuple ) -> int:
UpperCAmelCase : Optional[Any] = XLMProphetNetTokenizer(__snake_case , keep_accents=__snake_case )
UpperCAmelCase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def A ( self : str ) -> List[Any]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : int = '''Hello World!'''
UpperCAmelCase : str = [35389, 6672, 49, 2]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def A ( self : Dict ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase : int = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 23 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
snake_case_ = precision
snake_case_ = ceil(precision / 14 )
snake_case_ = 426880 * Decimal(10005 ).sqrt()
snake_case_ = 1
snake_case_ = 13591409
snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ )
for k in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""") | 8 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str:
super().__init__(
split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = load_from_cache_file
snake_case_ = file_format
snake_case_ = Spark(
df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , )
def snake_case__( self : int ) ->Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 8 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : TreeNode | None = None
__UpperCamelCase : TreeNode | None = None
UpperCAmelCase__ : Optional[int] = namedtuple('CoinsDistribResult', 'moves excess')
def lowercase_ ( _snake_case ):
if root is None:
return 0
# Validation
def count_nodes(_snake_case ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_snake_case ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(_snake_case ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = get_distrib(node.left )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_distrib(node.right )
SCREAMING_SNAKE_CASE__ : str = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE__ : int = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE__ : Tuple = (
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
SCREAMING_SNAKE_CASE__ : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case ,_snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''DPTFeatureExtractor''']
lowerCAmelCase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a__ ( self , _a=0 ) -> Optional[Any]:
_A : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(_a ) )
_A : Optional[Any] = np.random.RandomState(_a )
_A : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> int:
_A : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
_A : Any = self.get_dummy_inputs()
_A : List[str] = pipe(**_a ).images
_A : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_A : List[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def a__ ( self ) -> Dict:
_A : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = self.get_dummy_inputs()
_A : Union[str, Any] = pipe(**_a ).images
_A : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Dict = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def a__ ( self ) -> List[Any]:
_A : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
# warmup pass to apply optimizations
_A : List[Any] = pipe(**self.get_dummy_inputs() )
_A : Tuple = self.get_dummy_inputs()
_A : List[str] = pipe(**_a ).images
_A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Dict = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def a__ ( self ) -> Union[str, Any]:
_A : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : int = self.get_dummy_inputs()
_A : Tuple = pipe(**_a ).images
_A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Any = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def a__ ( self ) -> List[Any]:
_A : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs()
_A : Tuple = pipe(**_a ).images
_A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : str = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Union[str, Any] = self.get_dummy_inputs()
_A : List[str] = pipe(**_a ).images
_A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Optional[int] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = ort.SessionOptions()
_A : List[str] = False
return options
def a__ ( self ) -> Tuple:
_A : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_A : Optional[Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
_A : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : str = """A fantasy landscape, trending on artstation"""
_A : List[str] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="""np""" , )
_A : Optional[Any] = output.images
_A : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_A : Optional[int] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def a__ ( self ) -> Optional[Any]:
_A : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_A : Union[str, Any] = init_image.resize((768, 512) )
_A : Any = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_A : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : str = """A fantasy landscape, trending on artstation"""
_A : int = np.random.RandomState(0 )
_A : Optional[Any] = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type="""np""" , )
_A : Tuple = output.images
_A : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_A : List[Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 26 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = LxmertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase ) | 8 | 0 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ):
try:
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ = []
snake_case_ = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Tuple = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : Optional[int] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def snake_case__( self : List[Any] ) ->Optional[int]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple:
snake_case_ = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse} | 8 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = 9
UpperCAmelCase_ : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_ : List[str] = kruskal(__snake_case , __snake_case )
UpperCAmelCase_ : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__snake_case ) == sorted(__snake_case )
| 29 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = nums.pop(0 )
snake_case_ = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def backtrack(SCREAMING_SNAKE_CASE__ ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_, snake_case_ = nums[i], nums[start]
backtrack(start + 1 )
snake_case_, snake_case_ = nums[i], nums[start] # backtrack
snake_case_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod() | 8 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( snake_case__: Tuple ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _lowercase ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> int:
lowercase_ = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=SCREAMING_SNAKE_CASE_ , help='''Name of the model to download''' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : bool ) -> Optional[Any]:
lowercase_ = model
lowercase_ = cache
lowercase_ = force
lowercase_ = trust_remote_code
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] ) | 8 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ : Optional[int] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
UpperCAmelCase_ : Tuple = {
'openbmb/cpm-ant-10b': 1024,
}
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Tuple:
"""simple docstring"""
a_ : Union[str, Any] = collections.OrderedDict()
with open(__A , 'r' , encoding='utf-8' ) as reader:
a_ : int = reader.readlines()
for index, token in enumerate(__A ):
a_ : Union[str, Any] = token.rstrip('\n' )
a_ : Union[str, Any] = index
return vocab
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_0_0 ) -> List[str]:
a_ : List[Any] = vocab
a_ : Tuple = unk_token
a_ : Tuple = max_input_chars_per_word
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : Any = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.max_input_chars_per_word:
return [self.unk_token]
a_ : Tuple = 0
a_ : Union[str, Any] = []
while start < len(SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = None
while start < end:
a_ : Dict = ''.join(chars[start:end] )
if substr in self.vocab:
a_ : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = end
return sub_tokens
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : str = ['''input_ids''', '''attention_mask''']
snake_case__ : Union[str, Any] = False
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict="<d>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</d>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE__ : Tuple="<pad>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : str="</n>" , SCREAMING_SNAKE_CASE__ : Any="</_>" , SCREAMING_SNAKE_CASE__ : Tuple="left" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Union[str, Any]:
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ , eod_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , line_token=SCREAMING_SNAKE_CASE__ , space_token=SCREAMING_SNAKE_CASE__ , padding_side=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = bod_token
a_ : str = eod_token
a_ : Optional[int] = load_vocab(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.encoder[space_token]
a_ : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a_ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) )
a_ : List[Any] = {v: k for k, v in self.encoder.items()}
a_ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
a_ : Union[str, Any] = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ , cut_all=SCREAMING_SNAKE_CASE__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
return output_tokens
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
a_ : Optional[Any] = [i for i in token_ids if i >= 0]
a_ : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
return token in self.encoder
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return "".join(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
a_ : str = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a_ : str = (filename_prefix + '-' if filename_prefix else '') + save_directory
a_ : int = 0
if " " in self.encoder:
a_ : List[str] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
a_ : Union[str, Any] = self.encoder['\n']
del self.encoder["\n"]
a_ : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE__ : x[1] ) )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a_ : Optional[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
| 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "vit_msn"
def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias | 8 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase :
def __init__( self : Any , A : int , A : Optional[Any]=2 , A : int=3 , A : Optional[Any]=4 , A : Union[str, Any]=2 , A : Dict=7 , A : Any=True , A : List[Any]=True , A : Any=True , A : str=True , A : Union[str, Any]=99 , A : Tuple=36 , A : Optional[int]=2 , A : Union[str, Any]=4 , A : Any=37 , A : Tuple="gelu" , A : Dict=0.1 , A : Optional[Any]=0.1 , A : List[Any]=5_12 , A : List[str]=16 , A : Optional[Any]=2 , A : Any=0.02 , A : Optional[int]=6 , A : int=6 , A : Optional[int]=3 , A : str=4 , A : Tuple=None , A : int=10_00 , ) -> Optional[Any]:
lowercase_ : Dict = parent
lowercase_ : Dict = batch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : List[str] = image_size
lowercase_ : str = patch_size
lowercase_ : Optional[int] = is_training
lowercase_ : str = use_input_mask
lowercase_ : Dict = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : Tuple = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : str = type_sequence_label_size
lowercase_ : List[Any] = initializer_range
lowercase_ : Dict = coordinate_size
lowercase_ : List[str] = shape_size
lowercase_ : int = num_labels
lowercase_ : Union[str, Any] = num_choices
lowercase_ : Union[str, Any] = scope
lowercase_ : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase_ : Union[str, Any] = text_seq_length
lowercase_ : Tuple = (image_size // patch_size) ** 2 + 1
lowercase_ : Dict = self.text_seq_length + self.image_seq_length
def A ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowercase_ : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase_ : List[str] = bbox[i, j, 3]
lowercase_ : List[str] = bbox[i, j, 1]
lowercase_ : List[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase_ : Dict = bbox[i, j, 2]
lowercase_ : int = bbox[i, j, 0]
lowercase_ : int = tmp_coordinate
lowercase_ : Any = tf.constant(A )
lowercase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase_ : Union[str, Any] = None
if self.use_token_type_ids:
lowercase_ : str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase_ : Optional[Any] = None
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase_ : Optional[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : Optional[Any] , A : Optional[int] , A : Optional[Any] , A : Tuple , A : str , A : Optional[int] , A : str ) -> Any:
lowercase_ : Tuple = TFLayoutLMvaModel(config=A )
# text + image
lowercase_ : Optional[Any] = model(A , pixel_values=A , training=A )
lowercase_ : Optional[Any] = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , training=A , )
lowercase_ : Any = model(A , bbox=A , pixel_values=A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase_ : Tuple = model(A , training=A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase_ : Union[str, Any] = model({'''pixel_values''': pixel_values} , training=A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , A : str , A : Optional[int] , A : Optional[Any] , A : str , A : List[str] , A : List[str] , A : Tuple ) -> Dict:
lowercase_ : int = self.num_labels
lowercase_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=A )
lowercase_ : Any = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , training=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict , A : int , A : int , A : Union[str, Any] , A : Any , A : Optional[int] , A : int , A : int ) -> List[Any]:
lowercase_ : Any = self.num_labels
lowercase_ : str = TFLayoutLMvaForTokenClassification(config=A )
lowercase_ : Tuple = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , training=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def A ( self : Optional[Any] , A : Dict , A : List[Any] , A : Dict , A : Tuple , A : Union[str, Any] , A : Union[str, Any] , A : int ) -> Tuple:
lowercase_ : Union[str, Any] = 2
lowercase_ : Dict = TFLayoutLMvaForQuestionAnswering(config=A )
lowercase_ : Optional[Any] = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , training=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Any ) -> Any:
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) : List[Any] = config_and_inputs
lowercase_ : List[str] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def A ( self : List[Any] , A : Tuple , A : Optional[int] , A : Dict , A : Tuple , A : List[str] ) -> List[Any]:
return True
def A ( self : Tuple , A : List[str] , A : Any , A : List[Any]=False ) -> dict:
lowercase_ : int = copy.deepcopy(A )
if model_class in get_values(A ):
lowercase_ : List[str] = {
k: tf.tile(tf.expand_dims(A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
lowercase_ : int = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A ):
lowercase_ : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowercase_ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A ):
lowercase_ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A ):
lowercase_ : Optional[int] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Union[str, Any] = TFLayoutLMvaModelTester(self )
lowercase_ : List[Any] = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : str ) -> Any:
self.config_tester.run_common_tests()
def A ( self : Dict ) -> Optional[int]:
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(A )
if getattr(A , '''hf_compute_loss''' , A ):
# The number of elements in the loss should be the same as the number of elements in the label
lowercase_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
lowercase_ : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=A )[0]
]
lowercase_ : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowercase_ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
lowercase_ : Any = prepared_for_class.pop('''input_ids''' )
lowercase_ : Optional[Any] = model(A , **A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowercase_ : str = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
lowercase_ : Union[str, Any] = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
lowercase_ : List[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowercase_ : Dict = -1_00
lowercase_ : Tuple = tf.convert_to_tensor(A )
lowercase_ : Union[str, Any] = model(A , **A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowercase_ : List[str] = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
lowercase_ : Tuple = model(A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowercase_ : str = self._prepare_for_class(inputs_dict.copy() , A , return_labels=A )
# Get keys that were added with the _prepare_for_class function
lowercase_ : List[str] = prepared_for_class.keys() - inputs_dict.keys()
lowercase_ : Any = inspect.signature(model.call ).parameters
lowercase_ : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowercase_ : Union[str, Any] = {0: '''input_ids'''}
for label_key in label_keys:
lowercase_ : Optional[int] = signature_names.index(A )
lowercase_ : Any = label_key
lowercase_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowercase_ : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowercase_ : int = prepared_for_class[value]
lowercase_ : Optional[int] = tuple(A )
# Send to model
lowercase_ : List[str] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def A ( self : Tuple ) -> Any:
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A , A , A , A )
def A ( self : str ) -> Optional[int]:
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[int] = type
self.model_tester.create_and_check_model(A , A , A , A , A , A )
def A ( self : Tuple ) -> int:
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
A , A , A , A , A , A , A )
def A ( self : Tuple ) -> Union[str, Any]:
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
A , A , A , A , A , A , A )
def A ( self : List[Any] ) -> Optional[Any]:
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
A , A , A , A , A , A , A )
@slow
def A ( self : Union[str, Any] ) -> str:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[Any] = TFLayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Dict ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def A ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ : List[str] = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=A , return_tensors='''tf''' ).pixel_values
lowercase_ : List[str] = tf.constant([[1, 2]] )
lowercase_ : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowercase_ : List[Any] = model(input_ids=A , bbox=A , pixel_values=A , training=A )
# verify the logits
lowercase_ : List[Any] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , A )
lowercase_ : List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ) )
| 33 |
from __future__ import annotations
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : Optional[int] = ["""image_processor""", """tokenizer"""]
__a : str = """CLIPImageProcessor"""
__a : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] , lowercase : int=None , lowercase : Tuple=None , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
def __call__( self : int , lowercase : List[str]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : int ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Any , **lowercase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : Any , **lowercase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
@property
def A ( self : Any ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase , )
return self.image_processor
| 34 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return x + 2
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
snake_case_ = '''x = y'''
snake_case_ = {'''y''': 5}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} )
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = '''y = add_two(x)'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Dict ) ->str:
snake_case_ = '''x = 3\ny = 5'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
def snake_case__( self : str ) ->Tuple:
snake_case_ = '''text = f\'This is x: {x}.\''''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} )
snake_case_ = {'''x''': 8}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} )
def snake_case__( self : str ) ->str:
snake_case_ = '''test_list = [x, add_two(x)]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [3, 5] )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''y = x'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} )
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 0\nfor i in range(3):\n x = i'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} ) | 8 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = False ) -> float:
if not arr:
return 0
snake_case__ : Optional[Any] = 0 if allow_empty_subarrays else float("""-inf""" )
snake_case__ : List[str] = 0.0
for num in arr:
snake_case__ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
snake_case__ : Optional[Any] = max(_lowerCAmelCase , _lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__a = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 35 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy'''
def snake_case__( self : Any ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return image
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = '''bf16''' if fpaa else None
snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained(
_UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase )
return model, params
def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict:
snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) | 8 | 0 |
import numpy as np
def A ( _lowerCamelCase ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 8 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase__ : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 37 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main() | 8 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : Any ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _A ( self : Optional[int] ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
UpperCamelCase :Dict = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _A ( self : int ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = FlaxBertModel.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase : int ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def _A ( self : Union[str, Any] ):
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase :Tuple = AutoTokenizer.from_pretrained(__lowerCamelCase )
UpperCamelCase :Any = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[str] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase : Any ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def _A ( self : Any ):
with self.assertRaisesRegex(
__lowerCamelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase :Dict = FlaxAutoModel.from_pretrained("""bert-base""" )
def _A ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__lowerCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained(__lowerCamelCase , revision="""aaaaaa""" )
def _A ( self : Any ):
with self.assertRaisesRegex(
__lowerCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
UpperCamelCase :int = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _A ( self : List[Any] ):
with self.assertRaisesRegex(__lowerCamelCase , """Use `from_pt=True` to load this model""" ):
UpperCamelCase :str = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 38 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''') | 8 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = StableDiffusionInpaintPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([])
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_UpperCAmelCase = CLIPTextModel(UpperCAmelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_UpperCAmelCase = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInpaintPipeline(**UpperCAmelCase )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase )
_UpperCAmelCase = sd_pipe(**UpperCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_UpperCAmelCase = 'stabilityai/stable-diffusion-2-inpainting'
_UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_UpperCAmelCase = 'stabilityai/stable-diffusion-2-inpainting'
_UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCAmelCase = 'stabilityai/stable-diffusion-2-inpainting'
_UpperCAmelCase = PNDMScheduler.from_pretrained(UpperCAmelCase , subfolder='scheduler' )
_UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , scheduler=UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 39 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase_ = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None:
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case__( self : str ) ->List[Any]:
return self.sp_model.get_piece_size()
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) ->Any:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple:
return self.sp_model.piece_to_id(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str:
snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase )
snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
snake_case_ = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) )
else:
snake_case_ = ''''''.join(_UpperCamelCase )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 8 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = """audio-spectrogram-transformer"""
def __init__( self : List[Any] , __UpperCAmelCase : str=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : int=1e-12 , __UpperCAmelCase : str=16 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any=10 , __UpperCAmelCase : List[Any]=10 , __UpperCAmelCase : List[str]=1024 , __UpperCAmelCase : str=128 , **__UpperCAmelCase : Dict , ):
super().__init__(**__UpperCAmelCase)
a : Any = hidden_size
a : Tuple = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Optional[int] = initializer_range
a : Any = layer_norm_eps
a : Optional[int] = patch_size
a : Optional[Any] = qkv_bias
a : Optional[Any] = frequency_stride
a : Optional[Any] = time_stride
a : Tuple = max_length
a : Optional[Any] = num_mel_bins
| 40 |
from __future__ import annotations
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {}
snake_case_ = 2
while True:
snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if factor:
snake_case_ = factor + prime
while x in factor_map:
x += factor
snake_case_ = factor
else:
snake_case_ = prime
yield prime
prime += 1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ):
snake_case_ = sieve()
snake_case_ = 1
while True:
snake_case_ = next(SCREAMING_SNAKE_CASE__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE__ )
n += 2
if __name__ == "__main__":
print(solution()) | 8 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : List[Any] =logging.get_logger(__name__)
_A : Tuple ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_A : List[Any] ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_A : List[Any] ={'''facebook/blenderbot-3B''': 128}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
a = BlenderbotTokenizer
def __init__( self: Union[str, Any] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[str]=None , UpperCamelCase__: int=None , UpperCamelCase__: Dict="replace" , UpperCamelCase__: Any="<s>" , UpperCamelCase__: Dict="</s>" , UpperCamelCase__: Any="</s>" , UpperCamelCase__: Union[str, Any]="<s>" , UpperCamelCase__: Tuple="<unk>" , UpperCamelCase__: Union[str, Any]="<pad>" , UpperCamelCase__: Optional[Any]="<mask>" , UpperCamelCase__: Tuple=False , UpperCamelCase__: str=True , **UpperCamelCase__: Optional[Any] , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : List[str] = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Union[str, Any] = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Tuple = """post_processor"""
lowerCamelCase__ : Tuple = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
lowerCamelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : str = tuple(state["""sep"""] )
if "cls" in state:
lowerCamelCase__ : Optional[Any] = tuple(state["""cls"""] )
lowerCamelCase__ : Optional[int] = False
if state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Tuple = add_prefix_space
lowerCamelCase__ : Optional[Any] = True
if state.get("""trim_offsets""" , UpperCamelCase__ ) != trim_offsets:
lowerCamelCase__ : int = trim_offsets
lowerCamelCase__ : int = True
if changes_to_apply:
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase__ , state.pop("""type""" ) )
lowerCamelCase__ : Any = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self: str ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
lowerCamelCase__ : int = value
def lowerCamelCase_ ( self: Union[str, Any] , *UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Any ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , *UpperCamelCase__: List[Any] , **UpperCamelCase__: int ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Dict = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self: Any , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
lowerCamelCase__ : str = """ """.join(UpperCamelCase__ )
lowerCamelCase__ : str = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : List[Any] = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 41 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=__A , default=__A , required=__A , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=__A , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=__A , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=__A , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=__A , default=0 , help='cuda_id.' , )
_snake_case = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Dict:
if not len(__A ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
_snake_case , _snake_case = imgs[0].size
_snake_case = Image.new('RGB' , size=(cols * w, rows * h) )
_snake_case , _snake_case = grid.size
for i, img in enumerate(__A ):
grid.paste(__A , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE__ ( __A , __A="robotic cat with wings" , __A=7.5 , __A=50 , __A=1 , __A=42 , ) -> Dict:
_snake_case = torch.Generator(pipeline.device ).manual_seed(__A )
_snake_case = pipeline(
__A , guidance_scale=__A , num_inference_steps=__A , generator=__A , num_images_per_prompt=__A , ).images
_snake_case = int(math.sqrt(__A ) )
_snake_case = image_grid(__A , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase : str = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
lowercase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
lowercase : Any = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
lowercase : Any = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
lowercase : Optional[int] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase : Optional[int] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
lowercase : List[Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
lowercase : Optional[Any] = unet.to(torch.device("cuda", args.cuda_id))
lowercase : Tuple = pipeline.to(unet.device)
lowercase , lowercase : Optional[int] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
lowercase : Tuple = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 42 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum"
SCREAMING_SNAKE_CASE : Tuple = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
SCREAMING_SNAKE_CASE : str = "summarizer"
SCREAMING_SNAKE_CASE : str = AutoTokenizer
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]:
return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase )
def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple:
return self.model.generate(**_UpperCamelCase )[0]
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any:
return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) | 8 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43 |
from collections import deque
from .hash_table import HashTable
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ = self.values[key]
def snake_case__( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase ) | 8 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_a : str = logging.get_logger(__name__)
_a : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : int = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_a : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_a : List[str] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_a : List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
_a : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
_a : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
_a : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_a : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_a : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Tuple = DPRContextEncoderTokenizer
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Tuple = DPRQuestionEncoderTokenizer
_a : List[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_a : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_a : List[str] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
class __A :
def __call__( self , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , **a__ , ):
if titles is None and texts is None:
return super().__call__(
a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
elif titles is None or texts is None:
_lowerCAmelCase : Union[str, Any] = titles if texts is None else texts
return super().__call__(
a__ , a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
_lowerCAmelCase : Union[str, Any] = titles if not isinstance(a__ , a__ ) else [titles]
_lowerCAmelCase : List[Any] = texts if not isinstance(a__ , a__ ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(a__ )
_lowerCAmelCase : Union[str, Any] = questions if not isinstance(a__ , a__ ) else [questions] * n_passages
assert len(a__ ) == len(
a__ ), F"There should be as many titles than texts but got {len(a__ )} titles and {len(a__ )} texts."
_lowerCAmelCase : str = super().__call__(a__ , a__ , padding=a__ , truncation=a__ )["""input_ids"""]
_lowerCAmelCase : str = super().__call__(a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ )["""input_ids"""]
_lowerCAmelCase : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a__ , a__ )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : Optional[Any] = attention_mask
return self.pad(a__ , padding=a__ , max_length=a__ , return_tensors=a__ )
def __A ( self , a__ , a__ , a__ = 16 , a__ = 64 , a__ = 4 , ):
_lowerCAmelCase : Dict = reader_input["""input_ids"""]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = reader_output[:3]
_lowerCAmelCase : List[str] = len(a__ )
_lowerCAmelCase : Union[str, Any] = sorted(range(a__ ) , reverse=a__ , key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : List[str] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Tuple = len(a__ )
_lowerCAmelCase : int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a__ , top_spans=a__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a__ , start_index=a__ , end_index=a__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(a__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __A ( self , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[str] = []
for start_index, start_score in enumerate(a__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Optional[Any] = sorted(a__ , key=lambda a__ : x[1] , reverse=a__ )
_lowerCAmelCase : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
_lowerCAmelCase : Union[str, Any] = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : int = ["input_ids", "attention_mask"]
_UpperCamelCase : Optional[int] = DPRReaderTokenizer
| 44 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_a , '''depth_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=3 , _a=32 , _a=0.25 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a=True , _a="relu6" , _a=1_280 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , ):
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = depth_divisible_by
__a = min_depth
__a = expand_ratio
__a = tf_padding
__a = output_stride
__a = first_layer_is_expansion
__a = finegrained_output
__a = hidden_act
__a = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileNetVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : str = False
def __UpperCAmelCase ( self ):
__a = MobileNetVaModelTester(self )
__a = MobileNetVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 16
self.assertEqual(len(_a ) , _a )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> Dict:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
__a = model.to(_a )
__a = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
| 45 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
snake_case_ = precision
snake_case_ = ceil(precision / 14 )
snake_case_ = 426880 * Decimal(10005 ).sqrt()
snake_case_ = 1
snake_case_ = 13591409
snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ )
for k in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""") | 8 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if openai_config_file == "":
lowerCAmelCase = OpenAIGPTConfig()
else:
lowerCAmelCase = OpenAIGPTConfig.from_json_file(SCREAMING_SNAKE_CASE )
lowerCAmelCase = OpenAIGPTModel(SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_openai_gpt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 46 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str:
super().__init__(
split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = load_from_cache_file
snake_case_ = file_format
snake_case_ = Spark(
df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , )
def snake_case__( self : int ) ->Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 8 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : Any , _a : List[Any] , _a : Optional[int]=7 , _a : Any=3 , _a : Optional[int]=18 , _a : Dict=30 , _a : int=400 , _a : Any=True , _a : List[str]=None , _a : str=True , _a : str=False , _a : Optional[int]=True , _a : List[str]=True , _a : int=[0.5, 0.5, 0.5] , _a : Tuple=[0.5, 0.5, 0.5] , ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size if size is not None else {'height': 18, 'width': 20}
_SCREAMING_SNAKE_CASE =do_thumbnail
_SCREAMING_SNAKE_CASE =do_align_axis
_SCREAMING_SNAKE_CASE =do_pad
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = DonutImageProcessor if is_vision_available() else None
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =DonutImageProcessingTester(self )
@property
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Tuple ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_resize' ) )
self.assertTrue(hasattr(_a , 'size' ) )
self.assertTrue(hasattr(_a , 'do_thumbnail' ) )
self.assertTrue(hasattr(_a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_a , 'do_pad' ) )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'image_mean' ) )
self.assertTrue(hasattr(_a , 'image_std' ) )
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
pass
@is_flaky()
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def A ( self : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 47 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''DPTFeatureExtractor''']
lowerCAmelCase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1024 ,_SCREAMING_SNAKE_CASE=1024 ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = SeqaSeqDataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,type_path="train" ,**_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = tok.pad_token_id
def get_lens(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Any = tqdm(
DataLoader(_SCREAMING_SNAKE_CASE ,batch_size=512 ,num_workers=8 ,shuffle=_SCREAMING_SNAKE_CASE ,collate_fn=ds.collate_fn ) ,desc=str(ds.len_file ) ,)
lowerCamelCase : Optional[int] = []
for batch in dl:
lowerCamelCase : List[Any] = batch["input_ids"].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist()
lowerCamelCase : List[Any] = batch["labels"].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
max_lens.append(max(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
else:
max_lens.extend(_SCREAMING_SNAKE_CASE )
return max_lens
lowerCamelCase : List[Any] = get_lens(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = SeqaSeqDataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,type_path="val" ,**_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = get_lens(_SCREAMING_SNAKE_CASE )
pickle_save(_SCREAMING_SNAKE_CASE ,train_ds.len_file )
pickle_save(_SCREAMING_SNAKE_CASE ,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 48 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = LxmertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase ) | 8 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _A :
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float):
'''simple docstring'''
__a = np.abs((a - b)).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'Difference between torch and flax is {diff} (>= {tol}).')
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim))
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = after_output[0]
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
pt_model.to(__SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__a = inputs_dict
__a = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__a = pt_model(**__SCREAMING_SNAKE_CASE).to_tuple()
__a = fx_model(**__SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , len(__SCREAMING_SNAKE_CASE) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_pt=__SCREAMING_SNAKE_CASE)
__a = fx_model_loaded(**__SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , len(__SCREAMING_SNAKE_CASE) , '''Output lengths differ between Flax and PyTorch''')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_flax=__SCREAMING_SNAKE_CASE)
pt_model_loaded.to(__SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__a = pt_model_loaded(**__SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , len(__SCREAMING_SNAKE_CASE) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4E-2)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __SCREAMING_SNAKE_CASE)
__a = fx_state
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a = config_inputs_dict.pop('''vision_config''')
__a = config_inputs_dict.pop('''text_config''')
__a = config_inputs_dict
self.check_equivalence_pt_to_flax(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a , __a = self.get_pretrained_model_and_inputs()
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = after_outputs[0]
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
@require_flax
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
__a = 13
__a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = FlaxViTModel(__SCREAMING_SNAKE_CASE)
__a = FlaxBertModel(__SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = FlaxViTModelTester(self)
__a = FlaxBertModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
__a , __a , __a , __a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
__a = 13
__a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = FlaxCLIPVisionModel(__SCREAMING_SNAKE_CASE)
__a = FlaxBertModel(__SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = FlaxCLIPVisionModelTester(self)
__a = FlaxBertModelTester(self)
__a = clip_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
__a , __a , __a , __a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0)
__a = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''')
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__a = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__a = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image , __SCREAMING_SNAKE_CASE , atol=1E-3))
| 49 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ):
try:
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ = []
snake_case_ = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 | 0 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : Optional[int] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def snake_case__( self : List[Any] ) ->Optional[int]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple:
snake_case_ = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse} | 8 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case_ : Union[str, Any] = TypeVar("T")
class __snake_case ( Generic[T] ):
UpperCAmelCase__ : deque[T] # Cache store of keys
UpperCAmelCase__ : set[T] # References of the keys in cache
UpperCAmelCase__ : int = 1_0 # Maximum capacity of cache
def __init__( self : Optional[int] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = deque()
UpperCAmelCase_ = set()
if not n:
UpperCAmelCase_ = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''')
else:
UpperCAmelCase_ = n
def lowerCamelCase ( self : int , _snake_case : T):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store) == LRUCache._MAX_CAPACITY:
UpperCAmelCase_ = self.dq_store.pop()
self.key_reference.remove(_snake_case)
else:
self.dq_store.remove(_snake_case)
self.dq_store.appendleft(_snake_case)
self.key_reference.add(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
for k in self.dq_store:
print(_snake_case)
def __repr__( self : Optional[Any]):
"""simple docstring"""
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store)}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 51 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = nums.pop(0 )
snake_case_ = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def backtrack(SCREAMING_SNAKE_CASE__ ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_, snake_case_ = nums[i], nums[start]
backtrack(start + 1 )
snake_case_, snake_case_ = nums[i], nums[start] # backtrack
snake_case_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod() | 8 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A_ ( _lowerCAmelCase ) -> int:
# getting number of pixels in the image
UpperCamelCase , UpperCamelCase : List[str] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : Tuple = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__lowerCamelCase : Any = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__lowerCamelCase : Optional[int] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 52 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__UpperCamelCase = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
__UpperCamelCase = sylvester(number - 1 )
__UpperCamelCase = num - 1
__UpperCamelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(f'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 53 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] ) | 8 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : Optional[int] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[int] = "xlm-roberta-xl"
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=2_5_0_8_8_0 , UpperCAmelCase__ : Optional[Any]=2_5_6_0 , UpperCAmelCase__ : Optional[Any]=3_6 , UpperCAmelCase__ : str=3_2 , UpperCAmelCase__ : Optional[Any]=1_0_2_4_0 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=5_1_4 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Tuple=1E-05 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Tuple="absolute" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Tuple , ) -> Optional[int]:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 54 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "vit_msn"
def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias | 8 | 0 |
'''simple docstring'''
import numpy as np
def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float = 1E-1_2 , UpperCAmelCase_ : int = 100 , ):
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase_ ) == np.iscomplexobj(UpperCAmelCase_ )
lowerCamelCase_ = np.iscomplexobj(UpperCAmelCase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 1E1_2
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase_ = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
# Normalize the resulting output vector.
lowerCamelCase_ = w / np.linalg.norm(UpperCAmelCase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase_ = vector.conj().T if is_complex else vector.T
lowerCamelCase_ = np.dot(UpperCAmelCase_ , np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Check convergence.
lowerCamelCase_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase_ = True
lowerCamelCase_ = lambda_
if is_complex:
lowerCamelCase_ = np.real(lambda_ )
return lambda_, vector
def __snake_case ( ):
lowerCamelCase_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase_ = np.array([41, 4, 20] )
lowerCamelCase_ = real_input_matrix.astype(np.complexaaa )
lowerCamelCase_ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase_ = real_input_matrix
lowerCamelCase_ = real_vector
elif problem_type == "complex":
lowerCamelCase_ = complex_input_matrix
lowerCamelCase_ = complex_vector
# Our implementation.
lowerCamelCase_ ,lowerCamelCase_ = power_iteration(UpperCAmelCase_ , UpperCAmelCase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase_ ,lowerCamelCase_ = np.linalg.eigh(UpperCAmelCase_ )
# Last eigenvalue is the maximum one.
lowerCamelCase_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase_ ) - np.abs(UpperCAmelCase_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 55 |
from __future__ import annotations
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a : List[str] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def A_ ( cls : str ):
snake_case_ = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def A_ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A_ ( self : Tuple ):
snake_case_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
snake_case_ = FlaxBertModel(lowercase_ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
snake_case_ = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
snake_case_ = flatten_dict(unfreeze(model.params ) )
snake_case_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ , repo_id='''test-model-flax''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
snake_case_ = flatten_dict(unfreeze(model.params ) )
snake_case_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F"{key} not identical" )
def A_ ( self : Optional[Any] ):
snake_case_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
snake_case_ = FlaxBertModel(lowercase_ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
snake_case_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
snake_case_ = flatten_dict(unfreeze(model.params ) )
snake_case_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase_ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
snake_case_ = flatten_dict(unfreeze(model.params ) )
snake_case_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F"{key} not identical" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = True
snake_case_ = flatten_dict(modela.params )
snake_case_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
snake_case_ = False
return models_are_equal
@require_flax
class a ( unittest.TestCase ):
def A_ ( self : Union[str, Any] ):
snake_case_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
snake_case_ = FlaxBertModel(lowercase_ )
snake_case_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) )
with self.assertRaises(lowercase_ ):
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ )
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
snake_case_ = FlaxBertModel(lowercase_ )
snake_case_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) , max_shard_size='''10KB''' )
with self.assertRaises(lowercase_ ):
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ )
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def A_ ( self : str ):
snake_case_ = '''bert'''
snake_case_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowercase_ ):
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ )
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = '''bert'''
snake_case_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowercase_ ):
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ )
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
| 56 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return x + 2
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
snake_case_ = '''x = y'''
snake_case_ = {'''y''': 5}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} )
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = '''y = add_two(x)'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Dict ) ->str:
snake_case_ = '''x = 3\ny = 5'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
def snake_case__( self : str ) ->Tuple:
snake_case_ = '''text = f\'This is x: {x}.\''''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} )
snake_case_ = {'''x''': 8}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} )
def snake_case__( self : str ) ->str:
snake_case_ = '''test_list = [x, add_two(x)]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [3, 5] )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''y = x'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} )
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 0\nfor i in range(3):\n x = i'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} ) | 8 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A : List[str] = {
"sample_size": 3_2,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [3_2, 6_4],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A : Tuple = {
"sample_size": 6_4,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A : Dict = {
"sample_size": 2_5_6,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A : Optional[Any] = {
"num_train_timesteps": 4_0,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A : List[str] = {
"num_train_timesteps": 2_0_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A : Optional[Any] = {
"num_train_timesteps": 1_5_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.0.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.0.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.2.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.2.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.0.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.0.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.3.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
__lowerCAmelCase = checkpoint[f"{old_prefix}.skip_connection.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
__lowerCAmelCase = checkpoint[f"{old_prefix}.norm.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.norm.bias"]
__lowerCAmelCase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
__lowerCAmelCase = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
__lowerCAmelCase = {}
__lowerCAmelCase = checkpoint["time_embed.0.weight"]
__lowerCAmelCase = checkpoint["time_embed.0.bias"]
__lowerCAmelCase = checkpoint["time_embed.2.weight"]
__lowerCAmelCase = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
__lowerCAmelCase = checkpoint["label_emb.weight"]
__lowerCAmelCase = checkpoint["input_blocks.0.0.weight"]
__lowerCAmelCase = checkpoint["input_blocks.0.0.bias"]
__lowerCAmelCase = unet_config["down_block_types"]
__lowerCAmelCase = unet_config["layers_per_block"]
__lowerCAmelCase = unet_config["attention_head_dim"]
__lowerCAmelCase = unet_config["block_out_channels"]
__lowerCAmelCase = 1
__lowerCAmelCase = channels_list[0]
for i, layer_type in enumerate(_UpperCamelCase ):
__lowerCAmelCase = channels_list[i]
__lowerCAmelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_UpperCamelCase ):
__lowerCAmelCase = f"down_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"input_blocks.{current_layer}.0"
__lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_UpperCamelCase ):
__lowerCAmelCase = f"down_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"input_blocks.{current_layer}.0"
__lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
__lowerCAmelCase = f"down_blocks.{i}.attentions.{j}"
__lowerCAmelCase = f"input_blocks.{current_layer}.1"
__lowerCAmelCase = convert_attention(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCAmelCase = f"down_blocks.{i}.downsamplers.0"
__lowerCAmelCase = f"input_blocks.{current_layer}.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
__lowerCAmelCase = current_channels
# hardcoded the mid-block for now
__lowerCAmelCase = "mid_block.resnets.0"
__lowerCAmelCase = "middle_block.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = "mid_block.attentions.0"
__lowerCAmelCase = "middle_block.1"
__lowerCAmelCase = convert_attention(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = "mid_block.resnets.1"
__lowerCAmelCase = "middle_block.2"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = unet_config["up_block_types"]
for i, layer_type in enumerate(_UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase = f"up_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"output_blocks.{current_layer}.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCAmelCase = f"up_blocks.{i}.upsamplers.0"
__lowerCAmelCase = f"output_blocks.{current_layer-1}.1"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase = f"up_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"output_blocks.{current_layer}.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
__lowerCAmelCase = f"up_blocks.{i}.attentions.{j}"
__lowerCAmelCase = f"output_blocks.{current_layer}.1"
__lowerCAmelCase = convert_attention(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCAmelCase = f"up_blocks.{i}.upsamplers.0"
__lowerCAmelCase = f"output_blocks.{current_layer-1}.2"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = checkpoint["out.0.weight"]
__lowerCAmelCase = checkpoint["out.0.bias"]
__lowerCAmelCase = checkpoint["out.2.weight"]
__lowerCAmelCase = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
A : Optional[Any] = parser.parse_args()
A : int = strabool(args.class_cond)
A : Dict = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
A : Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A : Union[str, Any] = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
A : Union[str, Any] = None
A : int = con_pt_to_diffuser(args.unet_path, unet_config)
A : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A : Any = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
A : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config)
A : Optional[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 57 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy'''
def snake_case__( self : Any ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return image
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = '''bf16''' if fpaa else None
snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained(
_UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase )
return model, params
def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict:
snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) | 8 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a_ ( snake_case_ ):
'''simple docstring'''
def snake_case_( self ) -> Tuple:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(A )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self._create_example_records()
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(A ):
self.assertDictEqual(A , example_records[i] )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self._create_example_records()
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
_SCREAMING_SNAKE_CASE = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case_( self ) -> Union[str, Any]: # checks what happens with missing columns
_SCREAMING_SNAKE_CASE = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def snake_case_( self ) -> Optional[Any]: # checks if the type can be inferred from the second record
_SCREAMING_SNAKE_CASE = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = Dataset.from_list([] )
self.assertEqual(len(A ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 58 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 8 | 0 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
return len(set(__lowerCamelCase ) ) == len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main() | 8 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCAmelCase : Tuple = sum(_snake_case ) / len(_snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''') | 8 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.