code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__lowerCAmelCase = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__lowerCAmelCase = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__lowerCAmelCase = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) ,codebase_urls=['https://www.atticusprojectai.org/cuad'] ,reference_urls=['https://www.atticusprojectai.org/cuad'] ,)
def __lowercase ( self : List[Any] ,_a : str ,_a : int ):
'''simple docstring'''
_a : Union[str, Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_a : Any = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_a : Optional[Any] = evaluate(dataset=SCREAMING_SNAKE_CASE_ ,predictions=SCREAMING_SNAKE_CASE_ )
return score
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
__lowerCAmelCase = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : str=False ,_a : Union[str, Any]=False ,_a : int=False ,_a : Any=None ,_a : Optional[Any]=None ,_a : int=None ,_a : Optional[int]=None ,_a : Optional[Dict[str, Any]] = None ,**_a : Tuple ,):
'''simple docstring'''
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_a : Tuple = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_a : int = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_a : Any = '''<|endoftext|>''' if eos_token is None else eos_token
_a : Union[str, Any] = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_a : Optional[Any] = unk_token if pad_token is None else pad_token
_a : Tuple = eos_token if bos_token is None else bos_token
else:
_a : str = '''<pad>''' if pad_token is None else pad_token
_a : Union[str, Any] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ ,remove_space=UpperCamelCase__ ,keep_accents=UpperCamelCase__ ,bos_token=UpperCamelCase__ ,eos_token=UpperCamelCase__ ,unk_token=UpperCamelCase__ ,pad_token=UpperCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**UpperCamelCase__ ,)
_a : str = do_lower_case
_a : List[str] = remove_space
_a : Any = keep_accents
_a : List[str] = vocab_file
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
_a : Optional[int] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_a : List[Any] = re.compile(
F"""[{''.join(map(UpperCamelCase__ ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self : Tuple ):
'''simple docstring'''
_a : Union[str, Any] = self.__dict__.copy()
_a : Any = None
return state
def __setstate__( self : str ,_a : str ):
'''simple docstring'''
_a : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : str = {}
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model )
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
_a : Union[str, Any] = self.non_printing_characters_re.sub('' ,UpperCamelCase__ )
# Normalize whitespaces
_a : Union[str, Any] = ''''''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_a : List[Any] = unicodedata.normalize('NFC' ,UpperCamelCase__ )
return text
def __lowercase ( self : Any ,_a : str ,**_a : Any ):
'''simple docstring'''
_a : Union[str, Any] = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ ,out_type=UpperCamelCase__ )
def __lowercase ( self : str ,_a : str ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase__ )
def __lowercase ( self : Optional[int] ,_a : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def __lowercase ( _a : str ):
'''simple docstring'''
return out_string
def __lowercase ( self : List[Any] ,_a : List[str] ):
'''simple docstring'''
_a : Optional[int] = []
_a : str = ''''''
_a : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
_a : Union[str, Any] = True
_a : str = []
else:
current_sub_tokens.append(UpperCamelCase__ )
_a : Tuple = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Dict ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : str = os.path.join(
UpperCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ ,'wb' ) as fi:
_a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def __lowercase ( self : int ,_a : Union[str, List[str]] ,_a : Union[str, bool] = False ):
'''simple docstring'''
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
_a : Optional[int] = self.preprocess_text(UpperCamelCase__ )
_a : Union[str, Any] = self.sp_model.encode(UpperCamelCase__ )
else:
_a : List[str] = [self.preprocess_text(UpperCamelCase__ ) for t in text]
_a : List[Any] = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
_a : Tuple = torch.tensor(UpperCamelCase__ )
return token_ids
def __lowercase ( self : str ,_a : Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(UpperCamelCase__ )
def __lowercase ( self : Any ,_a : "Conversation" ):
'''simple docstring'''
_a : Union[str, Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
_a : Dict = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 366 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 0 |
'''simple docstring'''
from math import sqrt
def UpperCAmelCase_ (__a : Tuple ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ (__a : Optional[int] = 1_0_0_0_1 ) -> int:
"""simple docstring"""
_a : Any = 0
_a : List[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = ["pixel_values"]
def __init__( self : Tuple ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : int ,):
'''simple docstring'''
super().__init__(**_lowercase )
_a : Any = size if size is not None else {'shortest_edge': 224}
_a : List[Any] = get_size_dict(_lowercase ,default_to_square=_lowercase )
_a : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_a : Tuple = get_size_dict(_lowercase ,default_to_square=_lowercase ,param_name='crop_size' )
_a : Union[str, Any] = do_resize
_a : Union[str, Any] = size
_a : Dict = resample
_a : List[str] = do_center_crop
_a : str = crop_size
_a : int = do_rescale
_a : Optional[Any] = rescale_factor
_a : Optional[int] = do_normalize
_a : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a : int = image_std if image_std is not None else OPENAI_CLIP_STD
_a : Union[str, Any] = do_convert_rgb
def __lowercase ( self : Union[str, Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Union[str, Any] ,):
'''simple docstring'''
_a : str = get_size_dict(_lowercase ,default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_a : int = get_resize_output_image_size(_lowercase ,size=size['shortest_edge'] ,default_to_square=_lowercase )
return resize(_lowercase ,size=_lowercase ,resample=_lowercase ,data_format=_lowercase ,**_lowercase )
def __lowercase ( self : Any ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : str ,):
'''simple docstring'''
_a : Any = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_lowercase ,size=(size['height'], size['width']) ,data_format=_lowercase ,**_lowercase )
def __lowercase ( self : Optional[Any] ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Dict ,):
'''simple docstring'''
return rescale(_lowercase ,scale=_lowercase ,data_format=_lowercase ,**_lowercase )
def __lowercase ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Union[str, Any] ,):
'''simple docstring'''
return normalize(_lowercase ,mean=_lowercase ,std=_lowercase ,data_format=_lowercase ,**_lowercase )
def __lowercase ( self : Dict ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : Any ,):
'''simple docstring'''
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : int = size if size is not None else self.size
_a : Dict = get_size_dict(_lowercase ,param_name='size' ,default_to_square=_lowercase )
_a : Optional[Any] = resample if resample is not None else self.resample
_a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : List[str] = crop_size if crop_size is not None else self.crop_size
_a : Dict = get_size_dict(_lowercase ,param_name='crop_size' ,default_to_square=_lowercase )
_a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_a : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : str = do_normalize if do_normalize is not None else self.do_normalize
_a : List[Any] = image_mean if image_mean is not None else self.image_mean
_a : Optional[Any] = image_std if image_std is not None else self.image_std
_a : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a : int = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a : int = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
_a : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
_a : str = [self.resize(image=_lowercase ,size=_lowercase ,resample=_lowercase ) for image in images]
if do_center_crop:
_a : List[str] = [self.center_crop(image=_lowercase ,size=_lowercase ) for image in images]
if do_rescale:
_a : Dict = [self.rescale(image=_lowercase ,scale=_lowercase ) for image in images]
if do_normalize:
_a : Optional[int] = [self.normalize(image=_lowercase ,mean=_lowercase ,std=_lowercase ) for image in images]
_a : Optional[int] = [to_channel_dimension_format(_lowercase ,_lowercase ) for image in images]
_a : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=_lowercase ,tensor_type=_lowercase )
| 369 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 0 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = metric_id
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Tuple = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def UpperCAmelCase_ (__a : List[str] , __a : Optional[Any] , __a : str , __a : Optional[int] , __a : str ):
"""simple docstring"""
if "tmp_path" in args:
_a : List[str] = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(a__ , match='https://huggingface.co/docs/evaluate' ):
func(*a__ )
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import math
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : Optional[int]=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
_a : int = n
_a : Any = [
[math.inf for j in range(0 ,_A )] for i in range(0 ,_A )
] # adjacency matrix for weight
_a : Optional[int] = [
[math.inf for j in range(0 ,_A )] for i in range(0 ,_A )
] # dp[i][j] stores minimum distance from i to j
def __lowercase ( self : Optional[Any] ,_a : List[str] ,_a : Dict ,_a : List[str] ):
'''simple docstring'''
_a : List[Any] = w
def __lowercase ( self : Any ):
'''simple docstring'''
for k in range(0 ,self.n ):
for i in range(0 ,self.n ):
for j in range(0 ,self.n ):
_a : Union[str, Any] = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] )
def __lowercase ( self : Any ,_a : Tuple ,_a : int ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 371 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = LayoutLMTokenizer
__UpperCAmelCase : Union[str, Any] = LayoutLMTokenizerFast
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : str = True
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
_a : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Optional[int] ,**_a : Union[str, Any] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : List[Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = """UNwant\u00E9d,running"""
_a : Optional[int] = """unwanted, running"""
return input_text, output_text
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_a ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
| 350 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase_ (__a : Optional[Any] , __a : List[str] , __a : List[str] ):
"""simple docstring"""
_a : Optional[int] = 1.5
_a : Tuple = int(factor * num_class_images )
_a : List[Any] = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
_a : List[Any] = client.query(text=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_a : Optional[Any] = int(factor * num_images )
_a : int = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 , )
_a : Optional[Any] = 0
_a : Optional[int] = 0
_a : Dict = tqdm(desc='downloading real regularization images' , total=lowerCAmelCase__ )
with open(f"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(f"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
f"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
_a : str = class_images[count]
count += 1
try:
_a : Any = requests.get(images['url'] )
if img.status_code == 2_0_0:
_a : List[Any] = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase_ ():
"""simple docstring"""
_a : str = argparse.ArgumentParser('' , add_help=lowerCAmelCase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_0_0 , type=lowerCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
__lowerCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCAmelCase = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,'models/bert/' ) )
_a : Union[str, Any] = self.transformer_dir
shutil.copy(
os.path.join(_a ,'src/transformers/models/bert/modeling_bert.py' ) ,os.path.join(self.transformer_dir ,'models/bert/modeling_bert.py' ) ,)
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def __lowercase ( self : Optional[Any] ,_a : Any ,_a : List[str] ,_a : int ,_a : List[str]=None ):
'''simple docstring'''
_a : Dict = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_a : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_a : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
_a : List[str] = black.format_str(_a ,mode=_a )
_a : Union[str, Any] = os.path.join(self.transformer_dir ,'new_code.py' )
with open(_a ,'w' ,newline='\n' ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=_a )
with open(_a ,'r' ) as f:
self.assertTrue(f.read() ,_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(_a ,_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' ,'BertLMPredictionHead' ,REFERENCE_CODE + '\n' ,)
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' ,'BertLMPredictionHead' ,_a ,)
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' ,'TestModelLMPredictionHead' ,re.sub('Bert' ,'TestModel' ,_a ) ,)
# Copy consistency with a really long name
_a : Optional[Any] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" ,F"""{long_class_name}LMPredictionHead""" ,re.sub('Bert' ,_a ,_a ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' ,'TestModelLMPredictionHead' ,_a ,overwrite_result=re.sub('Bert' ,'TestModel' ,_a ) ,)
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = check_copies.LOCALIZED_READMES['README_zh-hans.md']
_a : str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_a : Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_a : str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_a, _a : Union[str, Any] = check_copies.convert_to_localized_md(
_a ,_a ,localized_readme['format_model_list'] )
self.assertFalse(_a )
self.assertEqual(_a ,_a )
_a, _a : Optional[Any] = check_copies.convert_to_localized_md(
_a ,_a ,localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_a : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_a : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_a : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_a, _a : str = check_copies.convert_to_localized_md(
_a ,_a ,localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(_a ,_a )
| 352 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase = logging.getLogger()
__lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase__ ( lowercase_ ):
"""simple docstring"""
def __lowercase ( self : Any ,_a : List[Any] ):
'''simple docstring'''
os.makedirs(a__ ,exist_ok=a__ )
_a : Dict = {'source': 'What is love ?', 'target': 'life'}
_a : Optional[int] = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_a : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(a__ ,F"""{split}.{field}""" ) ,'w' ) as f:
f.write(a__ )
def __lowercase ( self : Optional[int] ,_a : Optional[int] ,_a : int = "pytorch" ):
'''simple docstring'''
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = os.path.join(a__ ,'output' )
_a : str = os.path.join(a__ ,'data' )
self._create_dummy_data(data_dir=a__ )
_a : str = F"""\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n """.split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_a : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(a__ ,env=self.get_env() )
_a : Any = os.path.join(a__ ,'metrics.json' )
with open(a__ ) as f:
_a : List[Any] = json.load(a__ )
return result
@require_torch_gpu
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
@require_torch_multi_gpu
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
@require_torch_gpu
@require_ray
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Dict = self._run_finetune(gpus=1 ,distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
@require_torch_multi_gpu
@require_ray
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self._run_finetune(gpus=1 ,distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] ,0.2 )
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : Any , __a : int ):
"""simple docstring"""
_a : List[Any] = b.T
_a : Union[str, Any] = np.sum(np.square(__a ) , axis=1 )
_a : int = np.sum(np.square(__a ) , axis=0 )
_a : str = np.matmul(__a , __a )
_a : Optional[int] = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase_ (__a : Dict , __a : Optional[int] ):
"""simple docstring"""
_a : Tuple = x.reshape(-1 , 3 )
_a : Union[str, Any] = squared_euclidean_distance(__a , __a )
return np.argmin(__a , axis=1 )
class UpperCAmelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ['''pixel_values''']
def __init__( self : int ,_a : Optional[Union[List[List[int]], np.ndarray]] = None ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : bool = True ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_a : Tuple = size if size is not None else {'height': 256, 'width': 256}
_a : Optional[Any] = get_size_dict(UpperCamelCase_ )
_a : List[Any] = np.array(UpperCamelCase_ ) if clusters is not None else None
_a : int = do_resize
_a : Any = size
_a : str = resample
_a : Optional[int] = do_normalize
_a : Optional[int] = do_color_quantize
def __lowercase ( self : Optional[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
_a : Dict = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
UpperCamelCase_ ,size=(size['height'], size['width']) ,resample=UpperCamelCase_ ,data_format=UpperCamelCase_ ,**UpperCamelCase_ )
def __lowercase ( self : Any ,_a : np.ndarray ,_a : Optional[Union[str, ChannelDimension]] = None ,):
'''simple docstring'''
_a : Union[str, Any] = rescale(image=UpperCamelCase_ ,scale=1 / 127.5 ,data_format=UpperCamelCase_ )
_a : Optional[Any] = image - 1
return image
def __lowercase ( self : List[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : Optional[bool] = None ,_a : Optional[Union[List[List[int]], np.ndarray]] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**_a : Tuple ,):
'''simple docstring'''
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = size if size is not None else self.size
_a : Union[str, Any] = get_size_dict(UpperCamelCase_ )
_a : str = resample if resample is not None else self.resample
_a : Any = do_normalize if do_normalize is not None else self.do_normalize
_a : Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_a : int = clusters if clusters is not None else self.clusters
_a : Optional[int] = np.array(UpperCamelCase_ )
_a : List[str] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
_a : Dict = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
_a : List[str] = [self.resize(image=UpperCamelCase_ ,size=UpperCamelCase_ ,resample=UpperCamelCase_ ) for image in images]
if do_normalize:
_a : Optional[Any] = [self.normalize(image=UpperCamelCase_ ) for image in images]
if do_color_quantize:
_a : Optional[int] = [to_channel_dimension_format(UpperCamelCase_ ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_a : Optional[Any] = np.array(UpperCamelCase_ )
_a : int = color_quantize(UpperCamelCase_ ,UpperCamelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_a : Dict = images.shape[0]
_a : List[str] = images.reshape(UpperCamelCase_ ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_a : List[str] = list(UpperCamelCase_ )
else:
_a : Tuple = [to_channel_dimension_format(UpperCamelCase_ ,UpperCamelCase_ ) for image in images]
_a : List[Any] = {'input_ids': images}
return BatchFeature(data=UpperCamelCase_ ,tensor_type=UpperCamelCase_ )
| 354 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list ):
"""simple docstring"""
if len(lowerCamelCase_ ) <= 1:
return [tuple(lowerCamelCase_ )]
_a : List[str] = []
def generate(__a : int , __a : list ):
_a : int = [0] * n
res.append(tuple(lowerCamelCase_ ) )
_a : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_a : Optional[int] = arr[i], arr[0]
else:
_a : Optional[Any] = arr[i], arr[c[i]]
res.append(tuple(lowerCamelCase_ ) )
c[i] += 1
_a : int = 0
else:
_a : List[str] = 0
i += 1
generate(len(lowerCamelCase_ ) , lowerCamelCase_ )
return res
if __name__ == "__main__":
__lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 355 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = inspect.getfile(accelerate.test_utils )
_a : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_a : Tuple = test_metrics
@require_cpu
def __lowercase ( self : Tuple ):
'''simple docstring'''
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def __lowercase ( self : List[Any] ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __lowercase ( self : Tuple ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_a : Any = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def UpperCAmelCase_ (__a : Optional[Any] , __a : List[str] , __a : Union[str, Any]=None , **__a : Tuple ):
"""simple docstring"""
_a : Optional[Any] = [x.strip() for x in open(_A ).readlines()]
_a : Dict = [x.strip() for x in open(_A ).readlines()][: len(_A )]
_a : Optional[Any] = calculate_rouge(_A , _A , **_A )
if save_path is not None:
save_json(_A , _A , indent=_A )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 357 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import os
def UpperCAmelCase_ (__a : List[Any] = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
_a : int = [
[int(lowerCAmelCase__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
_a : Tuple = len(lowerCAmelCase__ )
_a : List[Any] = len(matrix[0] )
_a : List[str] = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
_a : List[Any] = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
_a : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
_a : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_a : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class UpperCAmelCase__ ( __a ):
"""simple docstring"""
__UpperCAmelCase : Tuple = """align_text_model"""
def __init__( self : Union[str, Any] ,_a : Union[str, Any]=3_0522 ,_a : Union[str, Any]=768 ,_a : Optional[Any]=12 ,_a : Optional[int]=12 ,_a : Union[str, Any]=3072 ,_a : Optional[Any]="gelu" ,_a : Optional[int]=0.1 ,_a : List[str]=0.1 ,_a : str=512 ,_a : List[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=1E-12 ,_a : Optional[int]=0 ,_a : Union[str, Any]="absolute" ,_a : Union[str, Any]=True ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**a__ )
_a : int = vocab_size
_a : List[Any] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : str = num_attention_heads
_a : Optional[Any] = hidden_act
_a : Any = intermediate_size
_a : Optional[int] = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : List[str] = max_position_embeddings
_a : Tuple = type_vocab_size
_a : List[str] = initializer_range
_a : List[Any] = layer_norm_eps
_a : Optional[Any] = position_embedding_type
_a : int = use_cache
_a : List[Any] = pad_token_id
@classmethod
def __lowercase ( cls : Dict ,_a : Union[str, os.PathLike] ,**_a : List[str] ):
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_a, _a : Dict = cls.get_config_dict(a__ ,**a__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
_a : Optional[int] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ ,**a__ )
class UpperCAmelCase__ ( __a ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = """align_vision_model"""
def __init__( self : Union[str, Any] ,_a : int = 3 ,_a : int = 600 ,_a : float = 2.0 ,_a : float = 3.1 ,_a : int = 8 ,_a : List[int] = [3, 3, 5, 3, 5, 5, 3] ,_a : List[int] = [32, 16, 24, 40, 80, 112, 192] ,_a : List[int] = [16, 24, 40, 80, 112, 192, 320] ,_a : List[int] = [] ,_a : List[int] = [1, 2, 2, 2, 1, 2, 1] ,_a : List[int] = [1, 2, 2, 3, 3, 4, 1] ,_a : List[int] = [1, 6, 6, 6, 6, 6, 6] ,_a : float = 0.25 ,_a : str = "swish" ,_a : int = 2560 ,_a : str = "mean" ,_a : float = 0.02 ,_a : float = 0.001 ,_a : float = 0.99 ,_a : float = 0.2 ,**_a : int ,):
'''simple docstring'''
super().__init__(**a__ )
_a : List[Any] = num_channels
_a : Tuple = image_size
_a : List[str] = width_coefficient
_a : Any = depth_coefficient
_a : Any = depth_divisor
_a : Union[str, Any] = kernel_sizes
_a : List[Any] = in_channels
_a : Any = out_channels
_a : Tuple = depthwise_padding
_a : Union[str, Any] = strides
_a : Optional[Any] = num_block_repeats
_a : Optional[Any] = expand_ratios
_a : List[str] = squeeze_expansion_ratio
_a : Union[str, Any] = hidden_act
_a : Optional[int] = hidden_dim
_a : Any = pooling_type
_a : Optional[int] = initializer_range
_a : Dict = batch_norm_eps
_a : Any = batch_norm_momentum
_a : Dict = drop_connect_rate
_a : Tuple = sum(a__ ) * 4
@classmethod
def __lowercase ( cls : Optional[int] ,_a : Union[str, os.PathLike] ,**_a : List[str] ):
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_a, _a : List[Any] = cls.get_config_dict(a__ ,**a__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
_a : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ ,**a__ )
class UpperCAmelCase__ ( __a ):
"""simple docstring"""
__UpperCAmelCase : Tuple = """align"""
__UpperCAmelCase : Dict = True
def __init__( self : List[str] ,_a : List[str]=None ,_a : Optional[int]=None ,_a : int=640 ,_a : Any=1.0 ,_a : Optional[int]=0.02 ,**_a : str ,):
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_a : Tuple = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
_a : Optional[Any] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
_a : str = AlignTextConfig(**a__ )
_a : Optional[Any] = AlignVisionConfig(**a__ )
_a : Optional[int] = projection_dim
_a : Any = temperature_init_value
_a : Dict = initializer_range
@classmethod
def __lowercase ( cls : Tuple ,_a : AlignTextConfig ,_a : AlignVisionConfig ,**_a : str ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**a__ )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = copy.deepcopy(self.__dict__ )
_a : Union[str, Any] = self.text_config.to_dict()
_a : str = self.vision_config.to_dict()
_a : int = self.__class__.model_type
return output
| 359 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 0 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (UnCLIPScheduler,)
def __lowercase ( self : Optional[int] ,**_a : Union[str, Any] ):
'''simple docstring'''
_a : int = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__A )
return config
def __lowercase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def __lowercase ( self : Dict ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__A )
def __lowercase ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def __lowercase ( self : int ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A ,prev_timestep=__A )
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config(variance_type='fixed_small_log' )
_a : List[Any] = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __lowercase ( self : int ):
'''simple docstring'''
_a : Union[str, Any] = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config(variance_type='learned_range' )
_a : str = scheduler_class(**__A )
_a : int = 0.5
assert scheduler._get_variance(1 ,predicted_variance=__A ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 ,predicted_variance=__A ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 ,predicted_variance=__A ) - -0.001_0011 < 1E-5
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : str = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config()
_a : Optional[int] = scheduler_class(**__A )
_a : int = scheduler.timesteps
_a : Tuple = self.dummy_model()
_a : int = self.dummy_sample_deter
_a : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
_a : Optional[int] = model(__A ,__A )
# 2. predict previous mean of sample x_t-1
_a : Optional[int] = scheduler.step(__A ,__A ,__A ,generator=__A ).prev_sample
_a : List[Any] = pred_prev_sample
_a : int = torch.sum(torch.abs(__A ) )
_a : Tuple = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : int = self.scheduler_classes[0]
_a : Tuple = self.get_scheduler_config()
_a : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(25 )
_a : Dict = scheduler.timesteps
_a : int = self.dummy_model()
_a : Optional[int] = self.dummy_sample_deter
_a : Any = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
_a : str = model(__A ,__A )
if i + 1 == timesteps.shape[0]:
_a : Optional[int] = None
else:
_a : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_a : Any = scheduler.step(
__A ,__A ,__A ,prev_timestep=__A ,generator=__A ).prev_sample
_a : List[Any] = pred_prev_sample
_a : Tuple = torch.sum(torch.abs(__A ) )
_a : Optional[int] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCAmelCase = """main"""
# Default branch name
__lowerCAmelCase = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
__lowerCAmelCase = """aaaaaaa"""
# This commit does not exist, so we should 404.
__lowerCAmelCase = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCAmelCase = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def UpperCAmelCase_ ():
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCAmelCase_ ():
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : List[str] ):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def __lowercase ( self : List[Any] ,_a : Optional[Any] ):
'''simple docstring'''
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def __lowercase ( self : Any ,_a : Tuple ):
'''simple docstring'''
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def __lowercase ( self : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(find_labels(__a ) ,['labels'] )
self.assertEqual(find_labels(__a ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__a ) ,['start_positions', 'end_positions'] )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__a ) ,['labels'] )
@require_tf
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(find_labels(__a ) ,['labels'] )
self.assertEqual(find_labels(__a ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__a ) ,['start_positions', 'end_positions'] )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__a ) ,['labels'] )
@require_flax
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(find_labels(__a ) ,[] )
self.assertEqual(find_labels(__a ) ,[] )
self.assertEqual(find_labels(__a ) ,[] )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__a ) ,[] )
| 361 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = checkpoint
_a : Union[str, Any] = {}
_a : Tuple = vae_state_dict['encoder.conv_in.weight']
_a : Dict = vae_state_dict['encoder.conv_in.bias']
_a : Optional[int] = vae_state_dict['encoder.conv_out.weight']
_a : List[Any] = vae_state_dict['encoder.conv_out.bias']
_a : Tuple = vae_state_dict['encoder.norm_out.weight']
_a : Tuple = vae_state_dict['encoder.norm_out.bias']
_a : Tuple = vae_state_dict['decoder.conv_in.weight']
_a : int = vae_state_dict['decoder.conv_in.bias']
_a : str = vae_state_dict['decoder.conv_out.weight']
_a : str = vae_state_dict['decoder.conv_out.bias']
_a : Any = vae_state_dict['decoder.norm_out.weight']
_a : List[Any] = vae_state_dict['decoder.norm_out.bias']
_a : List[str] = vae_state_dict['quant_conv.weight']
_a : str = vae_state_dict['quant_conv.bias']
_a : Optional[int] = vae_state_dict['post_quant_conv.weight']
_a : Optional[int] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
_a : List[str] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(UpperCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
_a : Dict = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
_a : Any = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(UpperCAmelCase__ )
}
for i in range(UpperCAmelCase__ ):
_a : Any = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_a : Tuple = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
_a : int = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
_a : Optional[Any] = renew_vae_resnet_paths(UpperCAmelCase__ )
_a : str = {'old': f"""down.{i}.block""", 'new': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
_a : Optional[int] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
_a : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a : Dict = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
_a : Dict = renew_vae_resnet_paths(UpperCAmelCase__ )
_a : str = {'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
_a : List[Any] = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
_a : Union[str, Any] = renew_vae_attention_paths(UpperCAmelCase__ )
_a : int = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
conv_attn_to_linear(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
_a : Union[str, Any] = num_up_blocks - 1 - i
_a : Dict = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_a : str = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
_a : str = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
_a : Any = renew_vae_resnet_paths(UpperCAmelCase__ )
_a : List[Any] = {'old': f"""up.{block_id}.block""", 'new': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
_a : List[Any] = [key for key in vae_state_dict if 'decoder.mid.block' in key]
_a : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a : Tuple = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
_a : Tuple = renew_vae_resnet_paths(UpperCAmelCase__ )
_a : Union[str, Any] = {'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
_a : List[Any] = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
_a : Any = renew_vae_attention_paths(UpperCAmelCase__ )
_a : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
conv_attn_to_linear(UpperCAmelCase__ )
return new_checkpoint
def UpperCAmelCase_ (__a : int , __a : Dict , ):
"""simple docstring"""
_a : Union[str, Any] = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
_a : Union[str, Any] = io.BytesIO(r.content )
_a : str = OmegaConf.load(UpperCAmelCase__ )
_a : Optional[int] = 5_1_2
_a : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
_a : Optional[Any] = {}
with safe_open(UpperCAmelCase__ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
_a : Dict = f.get_tensor(UpperCAmelCase__ )
else:
_a : str = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ )['state_dict']
# Convert the VAE model.
_a : Dict = create_vae_diffusers_config(UpperCAmelCase__ , image_size=UpperCAmelCase__ )
_a : str = custom_convert_ldm_vae_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = AutoencoderKL(**UpperCAmelCase__ )
vae.load_state_dict(UpperCAmelCase__ )
vae.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
__lowerCAmelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,_a : str ):
'''simple docstring'''
_a : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_a : Dict = len(_A ) - 1
def __lowercase ( self : List[str] ,_a : Any ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_a : Union[str, Any] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,_A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ) ,5 ) == 1
return output_values
def __lowercase ( self : Tuple ,_a : Tuple ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_a : int = self.basis_function(_A )
_a : List[str] = 0.0
_a : Dict = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __lowercase ( self : Dict ,_a : Any = 0.01 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
_a : Dict = [] # x coordinates of points to plot
_a : Optional[Any] = [] # y coordinates of points to plot
_a : Tuple = 0.0
while t <= 1:
_a : List[Any] = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_a : Any = [i[0] for i in self.list_of_points]
_a : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
_A ,_A ,color='blue' ,label='Curve of Degree ' + str(self.degree ) ,)
plt.scatter(_A ,_A ,color='red' ,label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 363 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 0 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : Optional[Any] = int(snake_case__ )
_a : int = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def UpperCAmelCase_ (__a : Union[str, Any] , __a : List[Any] , __a : int , __a : Optional[int] , __a : List[str]=3_0_0 ):
"""simple docstring"""
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Dict = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_a : List[str] = f"""{elt:.6f}""" if isinstance(snake_case__ , snake_case__ ) else str(snake_case__ )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = 5
__UpperCAmelCase : Dict = 0.2
def __init__( self : Optional[Any] ,_a : Union[str, Any] ,_a : int = None ,_a : Optional[Any] = True ,_a : Optional[int] = None ,_a : Union[str, Any] = 300 ,):
'''simple docstring'''
_a : Dict = total
_a : Union[str, Any] = '' if prefix is None else prefix
_a : Optional[int] = leave
_a : Any = parent
_a : List[Any] = width
_a : int = None
_a : int = None
_a : Dict = None
def __lowercase ( self : str ,_a : int ,_a : Tuple = False ,_a : Tuple = None ):
'''simple docstring'''
_a : Optional[int] = value
if comment is not None:
_a : Any = comment
if self.last_value is None:
_a : str = time.time()
_a : str = value
_a : List[str] = None
_a : Optional[int] = self.warmup
_a : Optional[Any] = 1
self.update_bar(SCREAMING_SNAKE_CASE_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_a : List[str] = time.time()
_a : str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_a : Optional[int] = self.elapsed_time / (value - self.start_value)
else:
_a : List[Any] = None
if value >= self.total:
_a : Any = self.total
_a : Dict = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_a : List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE_ )
_a : int = value
_a : Tuple = current_time
if self.average_time_per_item is None:
_a : int = 1
else:
_a : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) ,1 )
def __lowercase ( self : Any ,_a : Tuple ,_a : Any=None ):
'''simple docstring'''
_a : List[Any] = ' ' * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE_ ) )) + str(SCREAMING_SNAKE_CASE_ )
if self.elapsed_time is None:
_a : Optional[int] = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_a : int = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_a : Any = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : int = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_a : Optional[int] = disp.display(disp.HTML(self.html_code ) ,display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class UpperCAmelCase__ ( a__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Optional[int]=None ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
_a : str = None if column_names is None else [column_names]
_a : Any = None
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_a : Union[str, Any] = disp.display(disp.HTML(self.html_code ) ,display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __lowercase ( self : Any ,_a : Tuple ):
'''simple docstring'''
if self.inner_table is None:
_a : List[str] = [list(values.keys() ), list(values.values() )]
else:
_a : int = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE_ )
_a : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def __lowercase ( self : Any ,_a : Any ,_a : List[str]=None ,_a : Optional[Any]=300 ):
'''simple docstring'''
_a : Any = NotebookProgressBar(SCREAMING_SNAKE_CASE_ ,prefix=SCREAMING_SNAKE_CASE_ ,parent=self ,width=SCREAMING_SNAKE_CASE_ )
return self.child_bar
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = None
self.display()
class UpperCAmelCase__ ( a__ ):
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
_a : List[Any] = None
_a : int = None
_a : Union[str, Any] = False
def __lowercase ( self : List[str] ,_a : List[Any] ,_a : Optional[Any] ,_a : Any ,**_a : int ):
'''simple docstring'''
_a : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_a : Optional[int] = 0
_a : Optional[int] = 0
_a : str = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_a : List[Any] = NotebookTrainingTracker(state.max_steps ,SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Optional[Any] ,_a : Any ,_a : Any ,_a : Dict ,**_a : Optional[int] ):
'''simple docstring'''
_a : List[str] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 ,comment=F"""Epoch {epoch}/{state.num_train_epochs}""" ,force_update=self._force_next_update ,)
_a : Optional[Any] = False
def __lowercase ( self : List[str] ,_a : int ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,**_a : Tuple ):
'''simple docstring'''
if not has_length(SCREAMING_SNAKE_CASE_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_a : Tuple = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE_ ) )
else:
_a : Any = NotebookProgressBar(len(SCREAMING_SNAKE_CASE_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __lowercase ( self : Dict ,_a : Tuple ,_a : Any ,_a : List[str] ,**_a : Dict ):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
_a : Optional[Any] = None
def __lowercase ( self : List[Any] ,_a : Tuple ,_a : Any ,_a : List[str] ,_a : str=None ,**_a : str ):
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_a : int = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_a : List[Any] = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Optional[int] ,_a : Dict ,_a : Any ,_a : Union[str, Any] ,_a : List[str]=None ,**_a : Optional[int] ):
'''simple docstring'''
if self.training_tracker is not None:
_a : List[str] = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_a : List[Any] = log['loss']
break
if self.first_column == "Epoch":
_a : Any = int(state.epoch )
else:
_a : Optional[int] = state.global_step
_a : List[str] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_a : int = re.sub(R'\_loss$' ,'' ,SCREAMING_SNAKE_CASE_ )
_a : Union[str, Any] = metrics.pop('total_flos' ,SCREAMING_SNAKE_CASE_ )
_a : Any = metrics.pop('epoch' ,SCREAMING_SNAKE_CASE_ )
_a : int = metrics.pop(F"""{metric_key_prefix}_runtime""" ,SCREAMING_SNAKE_CASE_ )
_a : List[Any] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" ,SCREAMING_SNAKE_CASE_ )
_a : List[str] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" ,SCREAMING_SNAKE_CASE_ )
_a : str = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" ,SCREAMING_SNAKE_CASE_ )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
_a : Tuple = v
else:
_a : List[str] = k.split('_' )
_a : Optional[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
_a : Optional[int] = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
self.training_tracker.remove_child()
_a : Any = None
# Evaluation takes a long time so we should force the next update.
_a : Optional[int] = True
def __lowercase ( self : int ,_a : Optional[int] ,_a : Any ,_a : Any ,**_a : Union[str, Any] ):
'''simple docstring'''
self.training_tracker.update(
state.global_step ,comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" ,force_update=SCREAMING_SNAKE_CASE_ )
_a : Dict = None
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase : Dict = 42
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,_a : Any=3 ,_a : List[Any]=3 ,_a : Union[str, Any]=("DownEncoderBlock2D",) ,_a : List[Any]=(64,) ,_a : Optional[Any]=2 ,_a : Dict=32 ,_a : List[Any]="silu" ,_a : Union[str, Any]=True ,):
'''simple docstring'''
super().__init__()
_a : Optional[Any] = layers_per_block
_a : Tuple = torch.nn.Convad(
_a ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_a : Tuple = None
_a : Any = nn.ModuleList([] )
# down
_a : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(_a ):
_a : Any = output_channel
_a : str = block_out_channels[i]
_a : Tuple = i == len(_a ) - 1
_a : List[str] = get_down_block(
_a ,num_layers=self.layers_per_block ,in_channels=_a ,out_channels=_a ,add_downsample=not is_final_block ,resnet_eps=1E-6 ,downsample_padding=0 ,resnet_act_fn=_a ,resnet_groups=_a ,attention_head_dim=_a ,temb_channels=_a ,)
self.down_blocks.append(_a )
# mid
_a : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=_a ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=_a ,temb_channels=_a ,)
# out
_a : Dict = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=_a ,eps=1E-6 )
_a : str = nn.SiLU()
_a : Dict = 2 * out_channels if double_z else out_channels
_a : Tuple = nn.Convad(block_out_channels[-1] ,_a ,3 ,padding=1 )
_a : int = False
def __lowercase ( self : Dict ,_a : Any ):
'''simple docstring'''
_a : Union[str, Any] = x
_a : Any = self.conv_in(_a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a : Tuple ):
def custom_forward(*_a : List[Any] ):
return module(*_a )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
_a : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) ,_a ,use_reentrant=_a )
# middle
_a : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,_a ,use_reentrant=_a )
else:
for down_block in self.down_blocks:
_a : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) ,_a )
# middle
_a : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,_a )
else:
# down
for down_block in self.down_blocks:
_a : List[Any] = down_block(_a )
# middle
_a : Optional[int] = self.mid_block(_a )
# post-process
_a : Optional[int] = self.conv_norm_out(_a )
_a : Dict = self.conv_act(_a )
_a : Union[str, Any] = self.conv_out(_a )
return sample
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Dict=3 ,_a : Optional[int]=3 ,_a : Any=("UpDecoderBlock2D",) ,_a : List[Any]=(64,) ,_a : Dict=2 ,_a : Dict=32 ,_a : Tuple="silu" ,_a : str="group" ,):
'''simple docstring'''
super().__init__()
_a : Any = layers_per_block
_a : List[Any] = nn.Convad(
_a ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_a : Optional[int] = None
_a : Any = nn.ModuleList([] )
_a : List[Any] = in_channels if norm_type == 'spatial' else None
# mid
_a : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=_a ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=_a ,temb_channels=_a ,)
# up
_a : int = list(reversed(_a ) )
_a : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_a ):
_a : str = output_channel
_a : List[str] = reversed_block_out_channels[i]
_a : Any = i == len(_a ) - 1
_a : str = get_up_block(
_a ,num_layers=self.layers_per_block + 1 ,in_channels=_a ,out_channels=_a ,prev_output_channel=_a ,add_upsample=not is_final_block ,resnet_eps=1E-6 ,resnet_act_fn=_a ,resnet_groups=_a ,attention_head_dim=_a ,temb_channels=_a ,resnet_time_scale_shift=_a ,)
self.up_blocks.append(_a )
_a : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_a : Union[str, Any] = SpatialNorm(block_out_channels[0] ,_a )
else:
_a : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=_a ,eps=1E-6 )
_a : Tuple = nn.SiLU()
_a : Union[str, Any] = nn.Convad(block_out_channels[0] ,_a ,3 ,padding=1 )
_a : Optional[Any] = False
def __lowercase ( self : Union[str, Any] ,_a : List[str] ,_a : Union[str, Any]=None ):
'''simple docstring'''
_a : str = z
_a : Optional[Any] = self.conv_in(_a )
_a : Union[str, Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a : Dict ):
def custom_forward(*_a : List[str] ):
return module(*_a )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
_a : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,_a ,_a ,use_reentrant=_a )
_a : Union[str, Any] = sample.to(_a )
# up
for up_block in self.up_blocks:
_a : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) ,_a ,_a ,use_reentrant=_a )
else:
# middle
_a : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,_a ,_a )
_a : Tuple = sample.to(_a )
# up
for up_block in self.up_blocks:
_a : str = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) ,_a ,_a )
else:
# middle
_a : Dict = self.mid_block(_a ,_a )
_a : Union[str, Any] = sample.to(_a )
# up
for up_block in self.up_blocks:
_a : Tuple = up_block(_a ,_a )
# post-process
if latent_embeds is None:
_a : Dict = self.conv_norm_out(_a )
else:
_a : Dict = self.conv_norm_out(_a ,_a )
_a : Dict = self.conv_act(_a )
_a : Dict = self.conv_out(_a )
return sample
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,_a : int ,_a : List[str] ,_a : str ,_a : int=None ,_a : Union[str, Any]="random" ,_a : Optional[int]=False ,_a : Any=True ):
'''simple docstring'''
super().__init__()
_a : List[Any] = n_e
_a : Union[str, Any] = vq_embed_dim
_a : Tuple = beta
_a : List[str] = legacy
_a : Any = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
_a : Any = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
_a : int = self.used.shape[0]
_a : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_a : int = self.re_embed
_a : Dict = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
_a : int = n_e
_a : Optional[int] = sane_index_shape
def __lowercase ( self : Dict ,_a : int ):
'''simple docstring'''
_a : Any = inds.shape
assert len(_a ) > 1
_a : Any = inds.reshape(ishape[0] ,-1 )
_a : List[str] = self.used.to(_a )
_a : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
_a : Optional[Any] = match.argmax(-1 )
_a : str = match.sum(2 ) < 1
if self.unknown_index == "random":
_a : List[Any] = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
_a : str = self.unknown_index
return new.reshape(_a )
def __lowercase ( self : Optional[Any] ,_a : Dict ):
'''simple docstring'''
_a : Optional[Any] = inds.shape
assert len(_a ) > 1
_a : int = inds.reshape(ishape[0] ,-1 )
_a : int = self.used.to(_a )
if self.re_embed > self.used.shape[0]: # extra token
_a : Optional[Any] = 0 # simply set to zero
_a : List[str] = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,_a )
return back.reshape(_a )
def __lowercase ( self : Dict ,_a : Tuple ):
'''simple docstring'''
_a : Tuple = z.permute(0 ,2 ,3 ,1 ).contiguous()
_a : Dict = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_a : Any = torch.argmin(torch.cdist(_a ,self.embedding.weight ) ,dim=1 )
_a : List[Any] = self.embedding(_a ).view(z.shape )
_a : Tuple = None
_a : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
_a : str = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_a : Optional[int] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_a : Dict = z + (z_q - z).detach()
# reshape back to match original input shape
_a : Optional[Any] = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
_a : Dict = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
_a : Tuple = self.remap_to_used(_a )
_a : Dict = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
_a : str = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __lowercase ( self : str ,_a : Any ,_a : Any ):
'''simple docstring'''
if self.remap is not None:
_a : Optional[int] = indices.reshape(shape[0] ,-1 ) # add batch axis
_a : int = self.unmap_to_all(_a )
_a : List[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_a : Union[str, Any] = self.embedding(_a )
if shape is not None:
_a : Optional[int] = z_q.view(_a )
# reshape back to match original input shape
_a : str = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : List[Any] ,_a : List[str]=False ):
'''simple docstring'''
_a : Union[str, Any] = parameters
_a, _a : Union[str, Any] = torch.chunk(_a ,2 ,dim=1 )
_a : Tuple = torch.clamp(self.logvar ,-30.0 ,20.0 )
_a : Dict = deterministic
_a : List[str] = torch.exp(0.5 * self.logvar )
_a : Dict = torch.exp(self.logvar )
if self.deterministic:
_a : Tuple = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def __lowercase ( self : Dict ,_a : str = None ):
'''simple docstring'''
_a : Tuple = randn_tensor(
self.mean.shape ,generator=_a ,device=self.parameters.device ,dtype=self.parameters.dtype )
_a : Optional[int] = self.mean + self.std * sample
return x
def __lowercase ( self : int ,_a : Union[str, Any]=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def __lowercase ( self : List[Any] ,_a : int ,_a : List[str]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
_a : Tuple = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=_a )
def __lowercase ( self : Any ):
'''simple docstring'''
return self.mean
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : int = 1_0000
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[datasets.Features] = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = ParquetConfig
def __lowercase ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : Tuple ,_a : Dict ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowercase ,(str, list, tuple) ):
_a : Any = data_files
if isinstance(__lowercase ,__lowercase ):
_a : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Tuple = [dl_manager.iter_files(__lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
_a : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(__lowercase ,__lowercase ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Dict = [dl_manager.iter_files(__lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__lowercase ):
with open(__lowercase ,'rb' ) as f:
_a : int = datasets.Features.from_arrow_schema(pq.read_schema(__lowercase ) )
break
splits.append(datasets.SplitGenerator(name=__lowercase ,gen_kwargs={'files': files} ) )
return splits
def __lowercase ( self : int ,_a : Tuple ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Tuple = table_cast(__lowercase ,self.info.features.arrow_schema )
return pa_table
def __lowercase ( self : List[str] ,_a : Tuple ):
'''simple docstring'''
_a : Any = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowercase ) ):
with open(__lowercase ,'rb' ) as f:
_a : Dict = pq.ParquetFile(__lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ):
_a : Tuple = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(__lowercase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowercase )}: {e}""" )
raise
| 366 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int ) -> int:
"""simple docstring"""
_a : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCAmelCase_ (__a : int ) -> int:
"""simple docstring"""
_a : Any = 0
while number > 0:
_a : Optional[Any] = number % 1_0
sum_of_digits += last_digit
_a : Dict = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def UpperCAmelCase_ (__a : int = 1_0_0 ) -> int:
"""simple docstring"""
_a : Optional[Any] = factorial(snake_case_ )
_a : Optional[Any] = split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
'''simple docstring'''
import string
def UpperCAmelCase_ (__a : int ):
_a : List[Any] = ''
for i in sequence:
_a : Dict = ord(lowerCAmelCase_ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def UpperCAmelCase_ (__a : Any ):
_a : Optional[int] = string.ascii_letters
_a : str = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase_ )] if c in letters else c for c in sequence )
def UpperCAmelCase_ ():
from timeit import timeit
print('Running performance benchmarks...' )
_a : Optional[Any] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(f"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase_ )} seconds""" )
print(f"""> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase_ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = torch.device("""cpu""")
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a : Optional[int] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def UpperCAmelCase_ (__a : int , __a : List[str] , __a : Optional[Any] ):
"""simple docstring"""
_a : Tuple = dct.pop(__SCREAMING_SNAKE_CASE )
_a : Dict = val
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[int] = []
for k in state_dict.keys():
_a : Dict = k
if ".pwconv" in k:
_a : Dict = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
_a : int = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
_a : Optional[Any] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
_a : Union[str, Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_a : List[str] = k_new.split('.' )
if ls[2].isdigit():
_a : Optional[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
_a : List[str] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCAmelCase_ (__a : Any , __a : str , __a : str ):
"""simple docstring"""
_a : int = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a : str = 1_0_0_0
_a : Dict = "huggingface/label-files"
_a : Tuple = "imagenet-1k-id2label.json"
_a : Tuple = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_a : Optional[int] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_a : str = idalabel
_a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a : List[Any] = [3, 3, 6, 4]
_a : Tuple = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_a : int = [3, 3, 9, 6]
_a : List[Any] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_a : Optional[int] = [4, 3, 1_0, 5]
_a : str = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_a : Tuple = [4, 4, 1_2, 6]
_a : int = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_a : int = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=__SCREAMING_SNAKE_CASE )
else:
_a : Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
_a : str = checkpoint
_a : int = create_rename_keys(__SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
_a : Dict = SwiftFormerForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
# prepare test inputs
_a : Dict = prepare_img()
_a : Dict = ViTImageProcessor.from_pretrained('preprocessor_config' )
_a : Any = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
_a : Dict = get_expected_output(__SCREAMING_SNAKE_CASE )
_a : str = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__lowerCAmelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 369 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ,_a : int ):
'''simple docstring'''
_a : Optional[int] = data
_a : int = None
_a : str = None
def UpperCAmelCase_ (__a : Node | None ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCAmelCase_ (__a : Node | None ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCAmelCase_ (__a : Node ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCAmelCase_ (): # Main function for testing.
"""simple docstring"""
_a : Union[str, Any] = Node(1 )
_a : List[Any] = Node(2 )
_a : int = Node(3 )
_a : Union[str, Any] = Node(4 )
_a : Union[str, Any] = Node(5 )
_a : int = Node(6 )
_a : str = Node(7 )
_a : Union[str, Any] = Node(8 )
_a : Dict = Node(9 )
print(is_full_binary_tree(__lowerCAmelCase ) )
print(depth_of_tree(__lowerCAmelCase ) )
print('Tree is: ' )
display(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"vocab_file": "vocab.json"}
__lowerCAmelCase = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
__lowerCAmelCase = {"mgp-str": 2_7}
class UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict ,_a : Optional[int] ,_a : List[Any]="[GO]" ,_a : Tuple="[GO]" ,_a : Optional[Any]="[s]" ,_a : List[Any]="[GO]" ,**_a : int ):
'''simple docstring'''
super().__init__(
unk_token=A_ ,bos_token=A_ ,eos_token=A_ ,pad_token=A_ ,**A_ ,)
with open(A_ ,encoding='utf-8' ) as vocab_handle:
_a : int = json.load(A_ )
_a : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return len(self.vocab )
def __lowercase ( self : Any ):
'''simple docstring'''
return dict(self.vocab ,**self.added_tokens_encoder )
def __lowercase ( self : Optional[Any] ,_a : Dict ):
'''simple docstring'''
_a : Tuple = []
for s in text:
char_tokens.extend(A_ )
return char_tokens
def __lowercase ( self : str ,_a : int ):
'''simple docstring'''
return self.vocab.get(A_ ,self.vocab.get(self.unk_token ) )
def __lowercase ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(A_ )
def __lowercase ( self : int ,_a : List[Any] ,_a : Optional[int] = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
_a : Union[str, Any] = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(A_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' )
return (vocab_file,)
| 371 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowerCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : List[Any]=-1 ):
'''simple docstring'''
_a : Optional[Any] = label_idx
def __lowercase ( self : List[Any] ,_a : str ,_a : Union[Split, str] ):
'''simple docstring'''
if isinstance(_a ,_a ):
_a : Optional[int] = mode.value
_a : Optional[int] = os.path.join(_a ,F"""{mode}.txt""" )
_a : List[str] = 1
_a : int = []
with open(_a ,encoding='utf-8' ) as f:
_a : Dict = []
_a : Optional[int] = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_a ,labels=_a ) )
guid_index += 1
_a : Optional[int] = []
_a : Optional[int] = []
else:
_a : Optional[int] = line.split(' ' )
words.append(splits[0] )
if len(_a ) > 1:
labels.append(splits[self.label_idx].replace('\n' ,'' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_a ,labels=_a ) )
return examples
def __lowercase ( self : Optional[Any] ,_a : TextIO ,_a : TextIO ,_a : List ):
'''simple docstring'''
_a : Any = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : List[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' ,line.split()[0] )
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
if path:
with open(_a ,'r' ) as f:
_a : str = f.read().splitlines()
if "O" not in labels:
_a : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __lowercase ( self : str ,_a : str ):
'''simple docstring'''
if path:
with open(_a ,'r' ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Tuple = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : Any ,_a : List[Any] ,_a : Union[Split, str] ):
'''simple docstring'''
if isinstance(_a ,_a ):
_a : Union[str, Any] = mode.value
_a : List[Any] = os.path.join(_a ,F"""{mode}.txt""" )
_a : int = 1
_a : Optional[Any] = []
with open(_a ,encoding='utf-8' ) as f:
for sentence in parse_incr(_a ):
_a : Any = []
_a : Optional[Any] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_a ) == len(_a )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_a ,labels=_a ) )
guid_index += 1
return examples
def __lowercase ( self : Tuple ,_a : TextIO ,_a : TextIO ,_a : List ):
'''simple docstring'''
_a : Any = 0
for sentence in parse_incr(_a ):
_a : List[str] = preds_list[example_id]
_a : Optional[Any] = ''
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_a )
example_id += 1
def __lowercase ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
if path:
with open(_a ,'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 350 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCAmelCase = """sshleifer/bart-tiny-random"""
__lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return AutoConfig.from_pretrained(_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Any = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.num_hidden_layers ,1 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=_a )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,1 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=_a ,d=_a )
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 352 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Union[str, Any]=13 ,_a : Optional[int]=32 ,_a : int=3 ,_a : List[str]=4 ,_a : Optional[int]=[10, 20, 30, 40] ,_a : Tuple=[2, 2, 3, 2] ,_a : Dict=True ,_a : List[str]=True ,_a : Optional[Any]=37 ,_a : Union[str, Any]="gelu" ,_a : Any=10 ,_a : str=0.02 ,_a : str=["stage2", "stage3", "stage4"] ,_a : List[str]=[2, 3, 4] ,_a : List[Any]=None ,):
'''simple docstring'''
_a : Dict = parent
_a : List[str] = batch_size
_a : str = image_size
_a : Tuple = num_channels
_a : List[str] = num_stages
_a : Optional[Any] = hidden_sizes
_a : str = depths
_a : int = is_training
_a : Optional[int] = use_labels
_a : Tuple = intermediate_size
_a : List[str] = hidden_act
_a : Union[str, Any] = num_labels
_a : Union[str, Any] = initializer_range
_a : Union[str, Any] = out_features
_a : List[str] = out_indices
_a : Union[str, Any] = scope
def __lowercase ( self : str ):
'''simple docstring'''
_a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Dict = None
if self.use_labels:
_a : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_labels )
_a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_a ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : Optional[Any] ,_a : int ):
'''simple docstring'''
_a : Any = ConvNextVaModel(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Any ,_a : Any ,_a : str ,_a : Optional[Any] ):
'''simple docstring'''
_a : Tuple = ConvNextVaForImageClassification(_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : Dict ,_a : Any ,_a : List[str] ,_a : List[Any] ):
'''simple docstring'''
_a : int = ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
_a : Any = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : str = None
_a : Optional[Any] = ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = self.prepare_config_and_inputs()
_a : Optional[Any] = config_and_inputs
_a : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = self.prepare_config_and_inputs()
_a : Tuple = config_and_inputs
_a : Union[str, Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Dict = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = ConvNextVaModelTester(self )
_a : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
_a : List[str] = True
if model_class.__name__ in [
*get_values(_a ),
*get_values(_a ),
]:
continue
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.train()
_a : Any = self._prepare_for_class(_a ,_a ,return_labels=_a )
_a : Optional[int] = model(**_a ).loss
loss.backward()
def __lowercase ( self : List[str] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Tuple = False
_a : int = True
if (
model_class.__name__
in [*get_values(_a ), *get_values(_a )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : List[Any] = model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_a ,_a ,return_labels=_a )
_a : str = model(**_a ).loss
loss.backward()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Any = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(_a : Any ,_a : List[str] ,_a : List[Any] ):
_a : Any = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : int = model(**self._prepare_for_class(_a ,_a ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : Tuple = self.model_tester.num_stages
self.assertEqual(len(_a ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Any = True
check_hidden_states_output(_a ,_a ,_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Optional[int] = ConvNextVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Dict ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_a )
_a : Any = self.default_image_processor
_a : Dict = prepare_img()
_a : Tuple = preprocessor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : str = model(**_a )
# verify the logits
_a : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : int = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
__lowerCAmelCase : Optional[int] = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 354 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 0 |
'''simple docstring'''
from typing import Any
def UpperCAmelCase_ (__a : list , __a : list , __a : dict , __a : dict , __a : dict , ):
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
_a : dict = {}
_a : dict = {}
for state in states_space:
_a : List[str] = observations_space[0]
_a : Union[str, Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_a : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
_a : List[Any] = observations_space[o]
_a : List[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_a : Tuple = ''
_a : Tuple = -1
for k_state in states_space:
_a : Dict = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_a : Any = probability
_a : Optional[int] = k_state
# Update probabilities and pointers dicts
_a : str = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_a : Any = arg_max
# The final observation
_a : str = observations_space[len(__a ) - 1]
# argmax for given final observation
_a : str = ''
_a : Any = -1
for k_state in states_space:
_a : int = probabilities[(k_state, final_observation)]
if probability > max_probability:
_a : Tuple = probability
_a : Optional[int] = k_state
_a : str = arg_max
# Process pointers backwards
_a : Union[str, Any] = last_state
_a : Optional[int] = []
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
_a : List[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Any , __a : Any , __a : Any , ):
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Any , __a : Any , __a : Any , ):
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def UpperCAmelCase_ (__a : Any , __a : Any ):
"""simple docstring"""
_validate_list(__a , 'observations_space' )
_validate_list(__a , 'states_space' )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
if not isinstance(_object , __a ):
_a : Any = f"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
_a : Union[str, Any] = f"""{var_name} must be a list of strings"""
raise ValueError(__a )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Any , ):
"""simple docstring"""
_validate_dict(__a , 'initial_probabilities' , __a )
_validate_nested_dict(__a , 'transition_probabilities' )
_validate_nested_dict(__a , 'emission_probabilities' )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def UpperCAmelCase_ (__a : Any , __a : str , __a : type , __a : bool = False ):
"""simple docstring"""
if not isinstance(_object , __a ):
_a : List[Any] = f"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
_a : Union[str, Any] = f"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
_a : str = 'nested dictionary ' if nested else ''
_a : Union[str, Any] = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''maskformer-swin'''
__UpperCAmelCase : Tuple = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict ,_a : Tuple=224 ,_a : Any=4 ,_a : List[str]=3 ,_a : Any=96 ,_a : int=[2, 2, 6, 2] ,_a : Optional[int]=[3, 6, 12, 24] ,_a : List[Any]=7 ,_a : Union[str, Any]=4.0 ,_a : str=True ,_a : int=0.0 ,_a : Optional[Any]=0.0 ,_a : int=0.1 ,_a : Tuple="gelu" ,_a : int=False ,_a : Tuple=0.02 ,_a : int=1E-5 ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(**_a )
_a : Optional[Any] = image_size
_a : List[Any] = patch_size
_a : int = num_channels
_a : List[str] = embed_dim
_a : int = depths
_a : str = len(_a )
_a : Dict = num_heads
_a : Any = window_size
_a : str = mlp_ratio
_a : str = qkv_bias
_a : Tuple = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Tuple = drop_path_rate
_a : List[str] = hidden_act
_a : int = use_absolute_embeddings
_a : Optional[int] = layer_norm_eps
_a : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a : str = int(embed_dim * 2 ** (len(_a ) - 1) )
_a : Tuple = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_a ) + 1 )]
_a : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_a ,out_indices=_a ,stage_names=self.stage_names )
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ,_a : List[Any] ,_a : List[str]=2 ,_a : Optional[Any]=True ,_a : Union[str, Any]=False ,_a : List[Any]=10 ,_a : Optional[int]=3 ,_a : List[str]=32 * 8 ,_a : Optional[Any]=32 * 8 ,_a : int=4 ,_a : Union[str, Any]=64 ,):
'''simple docstring'''
_a : Dict = parent
_a : int = batch_size
_a : Optional[Any] = is_training
_a : Any = use_auxiliary_loss
_a : Any = num_queries
_a : Tuple = num_channels
_a : str = min_size
_a : Union[str, Any] = max_size
_a : Union[str, Any] = num_labels
_a : Union[str, Any] = hidden_dim
_a : List[Any] = hidden_dim
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a )
_a : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5
).float()
_a : str = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long()
_a : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
_a : Tuple = self.num_queries
_a : str = self.num_labels
_a : List[Any] = [1, 1, 1, 1]
_a : List[Any] = self.num_channels
_a : int = 64
_a : str = 128
_a : List[str] = self.hidden_dim
_a : str = self.hidden_dim
_a : Tuple = self.hidden_dim
return config
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = self.prepare_config_and_inputs()
_a : List[str] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : Tuple ,_a : Optional[int] ,_a : List[str] ):
'''simple docstring'''
_a : Any = output.encoder_hidden_states
_a : int = output.pixel_decoder_hidden_states
_a : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,config.decoder_layers )
def __lowercase ( self : str ,_a : int ,_a : str ,_a : List[str] ,_a : Optional[Any]=False ):
'''simple docstring'''
with torch.no_grad():
_a : str = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[int] = model(_a ,output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a ,_a )
def __lowercase ( self : List[str] ,_a : List[str] ,_a : str ,_a : List[Any] ,_a : List[Any] ,_a : Any ):
'''simple docstring'''
_a : List[str] = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Optional[Any] = model(pixel_values=_a ,pixel_mask=_a )
_a : Union[str, Any] = model(_a )
comm_check_on_output(_a )
_a : List[str] = model(
pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Tuple = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Any = False
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = MaskaFormerModelTester(self )
_a : List[str] = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[str] = [*signature.parameters.keys()]
_a : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_a : List[str] = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = (self.model_tester.min_size,) * 2
_a : List[str] = {
'pixel_values': torch.randn((2, 3, *size) ,device=_a ),
'mask_labels': torch.randn((2, 10, *size) ,device=_a ),
'class_labels': torch.zeros(2 ,10 ,device=_a ).long(),
}
_a : str = self.model_tester.get_config()
_a : str = MaskaFormerForUniversalSegmentation(_a ).to(_a )
_a : int = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a ).to(_a )
_a : Optional[int] = model(**_a ,output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_a : Tuple = self.all_model_classes[1]
_a : str = self.model_tester.prepare_config_and_inputs()
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.train()
_a : str = model(_a ,mask_labels=_a ,class_labels=_a ).loss
loss.backward()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = self.all_model_classes[1]
_a : Tuple = self.model_tester.prepare_config_and_inputs()
_a : Optional[Any] = True
_a : Any = True
_a : Tuple = model_class(_a ).to(_a )
model.train()
_a : List[Any] = model(_a ,mask_labels=_a ,class_labels=_a )
_a : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_a : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
_a : str = self.default_image_processor
_a : List[str] = prepare_img()
_a : int = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : List[str] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Any = model(**_a )
_a : List[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Union[str, Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Optional[Any] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Dict = self.default_image_processor
_a : Any = prepare_img()
_a : Union[str, Any] = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Dict = model(**_a )
# masks_queries_logits
_a : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_a : Union[str, Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_a : Dict = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) )
# class_queries_logits
_a : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
_a : Any = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Tuple = self.default_image_processor
_a : List[str] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : Tuple = inputs['pixel_values'].to(_a )
_a : Optional[Any] = [el.to(_a ) for el in inputs['mask_labels']]
_a : List[Any] = [el.to(_a ) for el in inputs['class_labels']]
with torch.no_grad():
_a : int = model(**_a )
self.assertTrue(outputs.loss is not None )
| 357 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : Tuple ,_a : Optional[Any]=13 ,_a : List[str]=7 ,_a : str=True ,_a : List[str]=True ,_a : Optional[int]=True ,_a : Union[str, Any]=True ,_a : Union[str, Any]=True ,_a : Union[str, Any]=False ,_a : Union[str, Any]=False ,_a : List[Any]=False ,_a : str=2 ,_a : List[str]=99 ,_a : Tuple=0 ,_a : List[str]=32 ,_a : Dict=5 ,_a : Union[str, Any]=4 ,_a : Union[str, Any]=0.1 ,_a : Tuple=0.1 ,_a : Union[str, Any]=512 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : List[str]=2 ,_a : Dict=4 ,_a : int="last" ,_a : Optional[Any]=True ,_a : str=None ,_a : str=0 ,):
'''simple docstring'''
_a : Dict = parent
_a : Tuple = batch_size
_a : List[Any] = seq_length
_a : Any = is_training
_a : List[Any] = use_input_lengths
_a : Optional[Any] = use_token_type_ids
_a : Tuple = use_labels
_a : Dict = gelu_activation
_a : int = sinusoidal_embeddings
_a : int = causal
_a : Any = asm
_a : Dict = n_langs
_a : str = vocab_size
_a : Optional[int] = n_special
_a : List[Any] = hidden_size
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Optional[Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : List[str] = type_sequence_label_size
_a : List[str] = initializer_range
_a : Any = num_labels
_a : Optional[int] = num_choices
_a : List[str] = summary_type
_a : Optional[Any] = use_proj
_a : Dict = scope
_a : List[Any] = bos_token_id
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_a : int = None
if self.use_input_lengths:
_a : Union[str, Any] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a : Optional[Any] = None
if self.use_token_type_ids:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
_a : Any = None
_a : List[Any] = None
_a : Any = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : Optional[Any] = ids_tensor([self.batch_size] ,2 ).float()
_a : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
_a : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : str ,_a : str ,_a : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : List[Any] ,_a : Optional[Any] ,_a : List[Any] ,):
'''simple docstring'''
_a : List[str] = XLMModel(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a ,lengths=_a ,langs=_a )
_a : List[str] = model(_a ,langs=_a )
_a : Any = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Any ,_a : Any ,_a : int ,_a : str ,_a : Dict ,_a : Union[str, Any] ,_a : Optional[int] ,_a : List[str] ,_a : Dict ,_a : str ,):
'''simple docstring'''
_a : Union[str, Any] = XLMWithLMHeadModel(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[Any] ,_a : Tuple ,_a : List[str] ,_a : Optional[int] ,_a : Optional[Any] ,_a : Any ,_a : int ,_a : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,):
'''simple docstring'''
_a : str = XLMForQuestionAnsweringSimple(_a )
model.to(_a )
model.eval()
_a : int = model(_a )
_a : int = model(_a ,start_positions=_a ,end_positions=_a )
_a : Optional[int] = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : Optional[int] ,_a : Optional[Any] ,_a : Optional[int] ,_a : Any ,_a : Tuple ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : str ,_a : List[Any] ,):
'''simple docstring'''
_a : Tuple = XLMForQuestionAnswering(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
_a : Tuple = model(
_a ,start_positions=_a ,end_positions=_a ,cls_index=_a ,is_impossible=_a ,p_mask=_a ,)
_a : Tuple = model(
_a ,start_positions=_a ,end_positions=_a ,cls_index=_a ,is_impossible=_a ,)
(_a ) : str = result_with_labels.to_tuple()
_a : int = model(_a ,start_positions=_a ,end_positions=_a )
(_a ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def __lowercase ( self : List[str] ,_a : List[str] ,_a : List[str] ,_a : Union[str, Any] ,_a : Dict ,_a : Optional[Any] ,_a : Any ,_a : Union[str, Any] ,_a : Tuple ,_a : str ,):
'''simple docstring'''
_a : Dict = XLMForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : Any = model(_a )
_a : Optional[int] = model(_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : str ,_a : Optional[Any] ,_a : int ,_a : Optional[int] ,_a : Dict ,_a : int ,_a : int ,_a : Optional[int] ,_a : Optional[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
_a : List[str] = self.num_labels
_a : Optional[int] = XLMForTokenClassification(_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Any ,_a : List[Any] ,_a : Tuple ,_a : List[Any] ,_a : Optional[Any] ,_a : List[str] ,_a : Any ,_a : Optional[int] ,_a : int ,_a : Tuple ,):
'''simple docstring'''
_a : Tuple = self.num_choices
_a : Optional[Any] = XLMForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : Tuple = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : int = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : Optional[int] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Dict = self.prepare_config_and_inputs()
(
_a
) : List[str] = config_and_inputs
_a : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCAmelCase : Any = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase ( self : int ,_a : str ,_a : Union[str, Any] ,_a : Optional[int] ,_a : Any ,_a : Any ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Optional[Any]=False ):
'''simple docstring'''
_a : Union[str, Any] = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_a : Tuple = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
_a : Any = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = XLMModelTester(self )
_a : Union[str, Any] = ConfigTester(self ,config_class=_a ,emb_dim=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_a )
def __lowercase ( self : Tuple ,_a : Tuple ,_a : Dict ,_a : Union[str, Any] ,_a : List[Any] ,_a : Dict ,_a : List[str]=False ,_a : str=1 ):
'''simple docstring'''
self.assertIsInstance(_a ,_a )
self.assertListEqual(
[isinstance(_a ,_a ) for iter_attentions in attentions] ,[True] * len(_a ) )
self.assertEqual(len(_a ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_a ):
# adds PAD dummy token
_a : Optional[Any] = min_length + idx + 1
_a : Optional[Any] = min_length + idx + 1
_a : List[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(_a ) )
def __lowercase ( self : Any ,_a : int ,_a : int ,_a : List[str] ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[int]=False ,_a : List[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(_a ,_a )
self.assertListEqual(
[isinstance(_a ,_a ) for iter_hidden_states in hidden_states] ,[True] * len(_a ) ,)
self.assertEqual(len(_a ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_a ):
# adds PAD dummy token
_a : Dict = min_length + idx + 1
_a : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(_a ) ,)
pass
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] = XLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_a )
_a : Optional[int] = torch.tensor([[14, 447]] ,dtype=torch.long ,device=_a ) # the president
_a : int = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_a : Dict = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,_a )
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import math
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(__a , __a ):
_a : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 1:
_a : Any = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__a )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a : str = int(math.log(number // 3 , 2 ) ) + 2
_a : Union[str, Any] = [3, 5]
_a : Any = 2
_a : List[Any] = 3
for block in range(1 , __a ):
for _ in range(__a ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__lowerCAmelCase = 0
try:
__lowerCAmelCase = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 359 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
else:
return a * actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(__a , __a )
return actual_power(__a , __a )
if __name__ == "__main__":
print(power(-2, -3))
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
'''simple docstring'''
import math
import qiskit
def UpperCAmelCase_ (__a : int = 1 , __a : int = 1 , __a : int = 1 ):
"""simple docstring"""
if (
isinstance(__a , __a )
or isinstance(__a , __a )
or isinstance(__a , __a )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__a ) != input_a)
or (math.floor(__a ) != input_a)
or (math.floor(__a ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_a : Tuple = qiskit.QuantumRegister(4 , 'qr' )
_a : Optional[int] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_a : Dict = [input_a, input_a, carry_in]
_a : int = qiskit.QuantumCircuit(__a , __a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __a ) # measure the last two qbits
_a : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
_a : Optional[Any] = qiskit.execute(__a , __a , shots=1_0_0_0 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 361 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,_a : Optional[int] ):
'''simple docstring'''
_a : Dict = data
def __iter__( self : Any ):
'''simple docstring'''
for element in self.data:
yield element
def UpperCAmelCase_ (__a : Dict=True ):
"""simple docstring"""
_a : Tuple = Accelerator(even_batches=__a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCAmelCase_ (__a : Accelerator , __a : int , __a : int , __a : bool = False ):
"""simple docstring"""
if iterable:
_a : Dict = DummyIterableDataset(torch.as_tensor(range(__a ) ) )
else:
_a : str = TensorDataset(torch.as_tensor(range(__a ) ) )
_a : List[str] = DataLoader(__a , batch_size=__a )
_a : Any = accelerator.prepare(__a )
return dl
def UpperCAmelCase_ (__a : Accelerator , __a : int , __a : int , __a : List[int] , __a : List[int] , ):
"""simple docstring"""
_a : Dict = create_dataloader(accelerator=__a , dataset_size=__a , batch_size=__a )
_a : Any = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = create_accelerator(even_batches=__a )
verify_dataloader_batch_sizes(
__a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : str = create_accelerator(even_batches=__a )
_a : int = torch.nn.Linear(1 , 1 )
_a : int = accelerator.prepare(__a )
_a : Optional[int] = create_dataloader(__a , dataset_size=3 , batch_size=1 )
_a : Optional[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__a ):
_a : int = ddp_model(batch[0].float() )
_a : str = output.sum()
loss.backward()
batch_idxs.append(__a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with warnings.catch_warnings(record=__a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __a )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[Any] = True
_a : List[Any] = False
_a : str = create_accelerator(even_batches=__a )
_a : int = torch.nn.Linear(1 , 1 )
_a : Union[str, Any] = accelerator.prepare(__a )
_a : Dict = create_dataloader(__a , dataset_size=3 , batch_size=1 )
_a : Dict = create_dataloader(__a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
_a : List[str] = train_dl.batch_sampler.even_batches
_a : List[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = True
_a : Optional[int] = False
_a : Union[str, Any] = create_accelerator(even_batches=__a )
_a : Optional[int] = torch.nn.Linear(1 , 1 )
_a : List[Any] = accelerator.prepare(__a )
create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a )
_a : Union[str, Any] = create_dataloader(__a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
_a : str = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = create_accelerator()
_a : str = torch.nn.Linear(1 , 1 )
_a : Optional[Any] = accelerator.prepare(__a )
create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a )
with warnings.catch_warnings(record=__a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
pass
assert issubclass(w[-1].category , __a )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[Any] = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
_a : Union[str, Any] = accelerator.state.distributed_type
_a : List[Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__a )
_a : List[Any] = original_state
if __name__ == "__main__":
main()
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Any , __a : str , __a : List[Any] ):
"""simple docstring"""
_a : str = LxmertConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : Union[str, Any] = LxmertForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__a , __a , __a )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 363 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 0 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCAmelCase = NewType("""DataClass""", Any)
__lowerCAmelCase = NewType("""DataClassType""", Any)
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
if isinstance(__a , __a ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def UpperCAmelCase_ (__a : list ):
"""simple docstring"""
_a : List[Any] = {str(__a ): choice for choice in choices}
return lambda __a : str_to_choice.get(__a , __a )
def UpperCAmelCase_ (*,
__a : Union[str, List[str]] = None , __a : str = None , __a : Any = dataclasses.MISSING , __a : Callable[[], Any] = dataclasses.MISSING , __a : dict = None , **__a : str , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_a : str = {}
if aliases is not None:
_a : Optional[Any] = aliases
if help is not None:
_a : Union[str, Any] = help
return dataclasses.field(metadata=__a , default=__a , default_factory=__a , **__a )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Iterable[DataClassType]
def __init__( self : int ,_a : Union[DataClassType, Iterable[DataClassType]] ,**_a : List[str] ):
'''simple docstring'''
if "formatter_class" not in kwargs:
_a : List[str] = ArgumentDefaultsHelpFormatter
super().__init__(**_a )
if dataclasses.is_dataclass(_a ):
_a : Optional[int] = [dataclass_types]
_a : Any = list(_a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_a )
@staticmethod
def __lowercase ( _a : ArgumentParser ,_a : dataclasses.Field ):
'''simple docstring'''
_a : str = F"""--{field.name}"""
_a : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,_a ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
_a : Union[str, Any] = kwargs.pop('aliases' ,[] )
if isinstance(_a ,_a ):
_a : Union[str, Any] = [aliases]
_a : str = getattr(field.type ,'__origin__' ,field.type )
if origin_type is Union or (hasattr(_a ,'UnionType' ) and isinstance(_a ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_a ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F""" Problem encountered in field '{field.name}'.""" )
if type(_a ) not in field.type.__args__:
# filter `str` in Union
_a : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_a : List[Any] = getattr(field.type ,'__origin__' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_a : Tuple = (
field.type.__args__[0] if isinstance(_a ,field.type.__args__[1] ) else field.type.__args__[1]
)
_a : Dict = getattr(field.type ,'__origin__' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_a : Any = {}
if origin_type is Literal or (isinstance(field.type ,_a ) and issubclass(field.type ,_a )):
if origin_type is Literal:
_a : Optional[Any] = field.type.__args__
else:
_a : List[str] = [x.value for x in field.type]
_a : int = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
_a : List[Any] = field.default
else:
_a : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_a : List[str] = copy(_a )
# Hack because type=bool in argparse does not behave as we want.
_a : List[Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_a : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_a : int = default
# This tells argparse we accept 0 or 1 value after --field_name
_a : Dict = '?'
# This is the value that will get picked if we do --field_name (without value)
_a : str = True
elif isclass(_a ) and issubclass(_a ,_a ):
_a : Any = field.type.__args__[0]
_a : Optional[int] = '+'
if field.default_factory is not dataclasses.MISSING:
_a : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
_a : Optional[Any] = True
else:
_a : Union[str, Any] = field.type
if field.default is not dataclasses.MISSING:
_a : Tuple = field.default
elif field.default_factory is not dataclasses.MISSING:
_a : Union[str, Any] = field.default_factory()
else:
_a : int = True
parser.add_argument(_a ,*_a ,**_a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_a : Optional[Any] = False
parser.add_argument(F"""--no_{field.name}""" ,action='store_false' ,dest=field.name ,**_a )
def __lowercase ( self : List[str] ,_a : DataClassType ):
'''simple docstring'''
if hasattr(_a ,'_argument_group_name' ):
_a : Union[str, Any] = self.add_argument_group(dtype._argument_group_name )
else:
_a : Optional[Any] = self
try:
_a : Dict[str, type] = get_type_hints(_a )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_a ):
_a : Union[str, Any] = '.'.join(map(_a ,sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_a ):
if not field.init:
continue
_a : Any = type_hints[field.name]
self._parse_dataclass_field(_a ,_a )
def __lowercase ( self : Tuple ,_a : Optional[Any]=None ,_a : List[Any]=False ,_a : Any=True ,_a : Union[str, Any]=None ,_a : str=None ,):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_a : Optional[Any] = []
if args_filename:
args_files.append(Path(_a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_a : int = ArgumentParser()
args_file_parser.add_argument(_a ,type=_a ,action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
_a : Dict = args_file_parser.parse_known_args(args=_a )
_a : str = vars(_a ).get(args_file_flag.lstrip('-' ) ,_a )
if cmd_args_file_paths:
args_files.extend([Path(_a ) for p in cmd_args_file_paths] )
_a : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_a : List[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
_a : str = self.parse_known_args(args=_a )
_a : str = []
for dtype in self.dataclass_types:
_a : int = {f.name for f in dataclasses.fields(_a ) if f.init}
_a : Optional[int] = {k: v for k, v in vars(_a ).items() if k in keys}
for k in keys:
delattr(_a ,_a )
_a : Dict = dtype(**_a )
outputs.append(_a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __lowercase ( self : Dict ,_a : Dict[str, Any] ,_a : bool = False ):
'''simple docstring'''
_a : Optional[int] = set(args.keys() )
_a : Optional[int] = []
for dtype in self.dataclass_types:
_a : List[Any] = {f.name for f in dataclasses.fields(_a ) if f.init}
_a : Dict = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_a : Optional[Any] = dtype(**_a )
outputs.append(_a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(_a )}""" )
return tuple(_a )
def __lowercase ( self : Dict ,_a : str ,_a : bool = False ):
'''simple docstring'''
with open(Path(_a ) ,encoding='utf-8' ) as open_json_file:
_a : Dict = json.loads(open_json_file.read() )
_a : Union[str, Any] = self.parse_dict(_a ,allow_extra_keys=_a )
return tuple(_a )
def __lowercase ( self : Dict ,_a : str ,_a : bool = False ):
'''simple docstring'''
_a : List[Any] = self.parse_dict(yaml.safe_load(Path(_a ).read_text() ) ,allow_extra_keys=_a )
return tuple(_a )
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ,_a : bytes ):
'''simple docstring'''
_a : Optional[int] = data
# Initialize hash values
_a : Dict = [
0X6a09_e667,
0Xbb67_ae85,
0X3c6e_f372,
0Xa54f_f53a,
0X510e_527f,
0X9b05_688c,
0X1f83_d9ab,
0X5be0_cd19,
]
# Initialize round constants
_a : Optional[Any] = [
0X428a_2f98,
0X7137_4491,
0Xb5c0_fbcf,
0Xe9b5_dba5,
0X3956_c25b,
0X59f1_11f1,
0X923f_82a4,
0Xab1c_5ed5,
0Xd807_aa98,
0X1283_5b01,
0X2431_85be,
0X550c_7dc3,
0X72be_5d74,
0X80de_b1fe,
0X9bdc_06a7,
0Xc19b_f174,
0Xe49b_69c1,
0Xefbe_4786,
0X0fc1_9dc6,
0X240c_a1cc,
0X2de9_2c6f,
0X4a74_84aa,
0X5cb0_a9dc,
0X76f9_88da,
0X983e_5152,
0Xa831_c66d,
0Xb003_27c8,
0Xbf59_7fc7,
0Xc6e0_0bf3,
0Xd5a7_9147,
0X06ca_6351,
0X1429_2967,
0X27b7_0a85,
0X2e1b_2138,
0X4d2c_6dfc,
0X5338_0d13,
0X650a_7354,
0X766a_0abb,
0X81c2_c92e,
0X9272_2c85,
0Xa2bf_e8a1,
0Xa81a_664b,
0Xc24b_8b70,
0Xc76c_51a3,
0Xd192_e819,
0Xd699_0624,
0Xf40e_3585,
0X106a_a070,
0X19a4_c116,
0X1e37_6c08,
0X2748_774c,
0X34b0_bcb5,
0X391c_0cb3,
0X4ed8_aa4a,
0X5b9c_ca4f,
0X682e_6ff3,
0X748f_82ee,
0X78a5_636f,
0X84c8_7814,
0X8cc7_0208,
0X90be_fffa,
0Xa450_6ceb,
0Xbef9_a3f7,
0Xc671_78f2,
]
_a : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowercase ( _a : bytes ):
'''simple docstring'''
_a : Tuple = B'\x80' + (B'\x00' * (63 - (len(_a ) + 8) % 64))
_a : Optional[int] = struct.pack('>Q' ,(len(_a ) * 8) )
return data + padding + big_endian_integer
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_a : int = list(struct.unpack('>16L' ,_a ) )
# add 48 0-ed integers
words += [0] * 48
_a : int = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_a : Optional[int] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_a : Optional[Any] = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_a : Dict = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
_a : Tuple = self.ror(_a ,6 ) ^ self.ror(_a ,11 ) ^ self.ror(_a ,25 )
_a : List[Any] = (e & f) ^ ((~e & 0Xffff_ffff) & g)
_a : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
_a : List[Any] = self.ror(_a ,2 ) ^ self.ror(_a ,13 ) ^ self.ror(_a ,22 )
_a : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
_a : str = (sa + maj) % 0X1_0000_0000
_a : List[str] = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
_a : Union[str, Any] = [a, b, c, d, e, f, g, h]
# Modify final values
_a : Tuple = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
_a : Tuple = ''.join([hex(_a )[2:].zfill(8 ) for value in self.hashes] )
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : int ):
'''simple docstring'''
return 0Xffff_ffff & (value << (32 - rotations)) | (value >> rotations)
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Dict ):
'''simple docstring'''
import hashlib
_a : str = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_a ).hash ,hashlib.shaaaa(_a ).hexdigest() )
def UpperCAmelCase_ ():
"""simple docstring"""
import doctest
doctest.testmod()
_a : int = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_a : Optional[Any] = parser.parse_args()
_a : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_a : Dict = f.read()
else:
_a : Any = bytes(__a , 'utf-8' )
print(SHAaaa(__a ).hash )
if __name__ == "__main__":
main()
| 366 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 0 |
'''simple docstring'''
__lowerCAmelCase = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : Dict ):
_a : Optional[Any] = 1
_a : Optional[Any] = 2
while i * i <= n:
_a : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCAmelCase_ ():
_a : Any = 1
_a : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(__a ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__lowerCAmelCase = """src/transformers"""
__lowerCAmelCase = """docs/source/en"""
__lowerCAmelCase = """."""
def UpperCAmelCase_ (__a : List[str] , __a : Dict , __a : Optional[Any] ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' , newline='\n' ) as f:
_a : List[str] = f.readlines()
# Find the start prompt.
_a : Dict = 0
while not lines[start_index].startswith(__a ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
while not lines[end_index].startswith(__a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__lowerCAmelCase = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__lowerCAmelCase = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : Tuple = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , __a )
return [m.group(0 ) for m in matches]
def UpperCAmelCase_ (__a : Optional[int] , __a : Optional[Any] ):
"""simple docstring"""
_a : str = 2 if text == '✅' or text == '❌' else len(__a )
_a : List[Any] = (width - text_length) // 2
_a : Optional[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : List[str] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_a : Optional[int] = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(__a )
_a : Union[str, Any] = collections.defaultdict(__a )
_a : Optional[Any] = collections.defaultdict(__a )
_a : Optional[int] = collections.defaultdict(__a )
_a : int = collections.defaultdict(__a )
# Let's lookup through all transformers object (once).
for attr_name in dir(__a ):
_a : Tuple = None
if attr_name.endswith('Tokenizer' ):
_a : str = slow_tokenizers
_a : int = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
_a : Union[str, Any] = fast_tokenizers
_a : Union[str, Any] = attr_name[:-1_3]
elif _re_tf_models.match(__a ) is not None:
_a : List[str] = tf_models
_a : Union[str, Any] = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
_a : Optional[int] = flax_models
_a : Union[str, Any] = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
_a : Any = pt_models
_a : List[Any] = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_name_to_prefix.values():
_a : int = True
break
# Try again after removing the last word in the name
_a : Optional[Any] = ''.join(camel_case_split(__a )[:-1] )
# Let's build that table!
_a : Tuple = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_a : List[Any] = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_a : int = [len(__a ) + 2 for c in columns]
_a : Optional[Any] = max([len(__a ) for name in model_names] ) + 2
# Build the table per se
_a : Any = '|' + '|'.join([_center_text(__a , __a ) for c, w in zip(__a , __a )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
_a : Optional[int] = {True: '✅', False: '❌'}
for name in model_names:
_a : Optional[Any] = model_name_to_prefix[name]
_a : str = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__a , __a ) for l, w in zip(__a , __a )] ) + "|\n"
return table
def UpperCAmelCase_ (__a : List[Any]=False ):
"""simple docstring"""
_a : Union[str, Any] = _find_text_in_file(
filename=os.path.join(__a , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
_a : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__a , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 369 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase_ (__a : bool = True , *__a : List[Any] , **__a : List[str] ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_a : Optional[int] = False
if main_process_only:
_a : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*__a , **__a , disable=__a )
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Union[str, Any] ,_a : Tuple=13 ,_a : Union[str, Any]=7 ,_a : Tuple=True ,_a : int=True ,_a : int=True ,_a : str=True ,_a : Optional[int]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : Any="gelu" ,_a : Dict=0.1 ,_a : List[Any]=0.1 ,_a : List[str]=512 ,_a : str=16 ,_a : Optional[int]=2 ,_a : Optional[Any]=0.02 ,_a : int=False ,_a : Tuple=True ,_a : Dict="None" ,_a : Optional[Any]=3 ,_a : Dict=4 ,_a : Any=None ,):
'''simple docstring'''
_a : int = parent
_a : str = batch_size
_a : Dict = seq_length
_a : Optional[int] = is_training
_a : Union[str, Any] = use_input_mask
_a : Optional[int] = use_token_type_ids
_a : Tuple = use_labels
_a : Optional[int] = vocab_size
_a : str = hidden_size
_a : Dict = num_hidden_layers
_a : Dict = num_attention_heads
_a : List[Any] = intermediate_size
_a : int = hidden_act
_a : Dict = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : List[str] = max_position_embeddings
_a : str = type_vocab_size
_a : Any = type_sequence_label_size
_a : Union[str, Any] = initializer_range
_a : Union[str, Any] = num_labels
_a : List[str] = num_choices
_a : Optional[Any] = relative_attention
_a : Dict = position_biased_input
_a : Optional[int] = pos_att_type
_a : List[Any] = scope
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : int = None
if self.use_input_mask:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_a : Optional[Any] = None
if self.use_token_type_ids:
_a : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_a : Optional[Any] = None
_a : List[Any] = None
_a : List[str] = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : int = ids_tensor([self.batch_size] ,self.num_choices )
_a : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_config()
_a : Optional[int] = 300
return config
def __lowercase ( self : Dict ,_a : str ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def __lowercase ( self : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : Any ,_a : int ,_a : Optional[int] ,_a : Dict ,_a : List[Any] ):
'''simple docstring'''
_a : List[str] = DebertaModel(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a ,attention_mask=_a ,token_type_ids=_a )[0]
_a : Optional[int] = model(_a ,token_type_ids=_a )[0]
_a : Tuple = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def __lowercase ( self : int ,_a : Optional[int] ,_a : Tuple ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : str ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Any = DebertaForMaskedLM(config=_a )
model.to(_a )
model.eval()
_a : str = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[str] ,_a : str ,_a : List[str] ,_a : int ,_a : Any ,_a : Tuple ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : List[Any] = self.num_labels
_a : List[str] = DebertaForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : str = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def __lowercase ( self : Any ,_a : Dict ,_a : Optional[int] ,_a : Dict ,_a : List[Any] ,_a : int ,_a : Any ,_a : Optional[Any] ):
'''simple docstring'''
_a : Tuple = self.num_labels
_a : List[str] = DebertaForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Tuple ,_a : List[str] ,_a : Tuple ,_a : Tuple ,_a : int ,_a : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
_a : Dict = DebertaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : int = model(
_a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = self.prepare_config_and_inputs()
(
_a
) : str = config_and_inputs
_a : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Union[str, Any] = False
def __lowercase ( self : str ):
'''simple docstring'''
_a : Any = DebertaModelTester(self )
_a : Any = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = DebertaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@slow
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = DebertaModel.from_pretrained('microsoft/deberta-base' )
_a : Optional[int] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_a : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a : Dict = model(_a ,attention_mask=_a )[0]
# compare the actual values for a slice.
_a : int = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,_a ,atol=1E-4 ) ,F"""{output[:, 1:4, 1:4]}""" )
| 371 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] ,_a : List[str] ,_a : Optional[Any]=99 ,_a : Tuple=13 ,_a : Any=7 ,_a : List[Any]=9 ,_a : str=True ,_a : Optional[Any]=True ,_a : List[str]=False ,_a : Optional[Any]=32 ,_a : Dict=5 ,_a : Optional[int]=4 ,_a : Optional[Any]=37 ,_a : List[Any]=8 ,_a : Optional[int]=0.1 ,_a : List[str]=0.002 ,_a : int=1 ,_a : Dict=0 ,_a : List[str]=0 ,_a : Dict=None ,_a : Dict=None ,):
'''simple docstring'''
_a : str = parent
_a : Dict = batch_size
_a : Optional[int] = encoder_seq_length
_a : List[str] = decoder_seq_length
# For common tests
_a : Tuple = self.decoder_seq_length
_a : Dict = is_training
_a : Optional[Any] = use_attention_mask
_a : Dict = use_labels
_a : Optional[Any] = vocab_size
_a : int = hidden_size
_a : List[str] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Optional[int] = d_ff
_a : List[str] = relative_attention_num_buckets
_a : List[str] = dropout_rate
_a : Dict = initializer_factor
_a : int = eos_token_id
_a : Union[str, Any] = pad_token_id
_a : Tuple = decoder_start_token_id
_a : int = None
_a : Union[str, Any] = decoder_layers
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base' )
def __lowercase ( self : str ,_a : Union[str, Any] ,_a : int ,_a : Any ,_a : str=None ,_a : List[str]=None ,_a : str=None ,_a : Tuple=None ,_a : Tuple=None ,):
'''simple docstring'''
if attention_mask is None:
_a : Tuple = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a : Tuple = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=_a )
if decoder_head_mask is None:
_a : Tuple = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=_a )
if cross_attn_head_mask is None:
_a : Tuple = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_a : Any = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a : Any = input_ids.clamp(self.pad_token_id + 1 )
_a : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a : int = self.get_config()
_a : List[str] = config.num_attention_heads
_a : List[Any] = self.prepare_inputs_dict(_a ,_a ,_a )
return config, input_dict
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : Any ):
'''simple docstring'''
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __lowercase ( self : List[Any] ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Optional[int] ,_a : Union[str, Any] ,_a : int ,):
'''simple docstring'''
_a : Union[str, Any] = UMTaModel(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(
input_ids=_a ,decoder_input_ids=_a ,attention_mask=_a ,decoder_attention_mask=_a ,)
_a : List[str] = model(input_ids=_a ,decoder_input_ids=_a )
_a : int = result.last_hidden_state
_a : Dict = result.past_key_values
_a : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __lowercase ( self : str ,_a : int ,_a : Dict ,_a : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ,):
'''simple docstring'''
_a : Any = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
_a : Any = model(_a ,use_cache=_a )
_a : Optional[Any] = model(_a )
_a : Dict = model(_a ,use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
_a : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a : List[str] = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_a : List[str] = torch.cat([input_ids, next_tokens] ,dim=-1 )
_a : Tuple = model(_a )['last_hidden_state']
_a : Tuple = model(_a ,past_key_values=_a )['last_hidden_state']
# select random slice
_a : Tuple = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_a : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
_a : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1E-3 ) )
def __lowercase ( self : Optional[Any] ,_a : int ,_a : str ,):
'''simple docstring'''
_a : Optional[Any] = UMTaModel(config=_a ).to(_a ).half().eval()
_a : List[str] = model(**_a )['last_hidden_state']
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__UpperCAmelCase : List[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__UpperCAmelCase : List[Any] = [0.8, 0.9]
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : str = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=_a ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[str] = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : Optional[int] = config_and_inputs[0]
_a : Optional[int] = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
_a : Tuple = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=_a ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_a ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_a ),
}
for attn_name, (name, mask) in zip(_a ,head_masking.items() ):
_a : Optional[Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a : Union[str, Any] = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=_a )
_a : Dict = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=_a ,return_dict_in_generate=_a ,**_a ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_a : Any = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=_a ).to(_a )
_a : Optional[Any] = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=_a ,legacy=_a )
_a : Any = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_a : List[str] = tokenizer(_a ,return_tensors='pt' ,padding=_a ).input_ids
# fmt: off
_a : Dict = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a ,_a )
_a : Optional[int] = model.generate(input_ids.to(_a ) )
_a : str = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_a : Dict = tokenizer.batch_decode(_a )
self.assertEqual(_a ,_a )
| 350 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ProphetNetTokenizer
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().setUp()
_a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Any = 'UNwant\u00E9d,running'
_a : Any = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Any = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_a ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[9, 6, 7, 12, 10, 11] )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def __lowercase ( self : str ):
'''simple docstring'''
_a : int = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[Any] = BasicTokenizer(do_lower_case=_a ,strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = BasicTokenizer(do_lower_case=_a ,strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Tuple = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = BasicTokenizer(do_lower_case=_a ,strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = BasicTokenizer(do_lower_case=_a ,strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[Any] = BasicTokenizer(do_lower_case=_a ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Dict = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_a : List[str] = {}
for i, token in enumerate(_a ):
_a : List[str] = i
_a : Any = WordpieceTokenizer(vocab=_a ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_a : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : str = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
_a : Union[str, Any] = tokenizer(_a ,padding=_a ,return_tensors='pt' )
self.assertIsInstance(_a ,_a )
_a : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_a ,_a )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
def __lowercase ( self : int ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __lowercase ( self : str ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_a : Tuple = tokenizer.encode('sequence builders' ,add_special_tokens=_a )
_a : Union[str, Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=_a )
_a : int = tokenizer.build_inputs_with_special_tokens(_a )
_a : str = tokenizer.build_inputs_with_special_tokens(_a ,_a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
__lowerCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = os.path.dirname(os.path.realpath(__a ) )
_a : int = os.path.join(__a , 'words.txt' )
_a : Union[str, Any] = ''
with open(__a ) as f:
_a : Dict = f.readline()
_a : Tuple = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_a : str = [
word
for word in [sum(ord(__a ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__a )
if __name__ == "__main__":
print(solution())
| 352 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCAmelCase = 1_0
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
for i in range(__a , __a ):
if array[i] == target:
return i
return -1
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : Union[str, Any] = 0
_a : List[Any] = len(__a )
while left <= right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : Tuple = (left + right) // 3 + 1
_a : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_a : Optional[int] = one_third - 1
elif array[two_third] < target:
_a : Tuple = two_third + 1
else:
_a : Optional[Any] = one_third + 1
_a : Any = two_third - 1
else:
return -1
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : List[str] = (left + right) // 3 + 1
_a : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__a , one_third - 1 , __a , __a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __a , __a , __a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __a , __a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
__lowerCAmelCase = ite_ternary_search(collection, target)
__lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Any = """▁"""
__lowerCAmelCase : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCAmelCase : int = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCAmelCase : List[Any] = {
"""facebook/xglm-564M""": 2_0_4_8,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] ,_a : Optional[int] ,_a : Dict="<s>" ,_a : Optional[int]="</s>" ,_a : Optional[int]="</s>" ,_a : Any="<s>" ,_a : str="<unk>" ,_a : Tuple="<pad>" ,_a : Optional[Dict[str, Any]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_a : Dict = 7
_a : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
_a : List[str] = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_a : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_a : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_a : Optional[int] = len(self.sp_model )
_a : Dict = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_a )
_a : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ):
'''simple docstring'''
_a : str = self.__dict__.copy()
_a : Any = None
_a : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] ,_a : Tuple ):
'''simple docstring'''
_a : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Union[str, Any] = {}
_a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : List[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_a : int = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowercase ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a ))
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a ))
def __lowercase ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a : Union[str, Any] = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self : List[Any] ,_a : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self : int ,_a : List[str] ):
'''simple docstring'''
_a : Any = ''.join(_a ).replace(_a ,' ' ).strip()
return out_string
def __lowercase ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Dict = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 354 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''focalnet'''
def __init__( self : List[Any] ,_a : Optional[Any]=224 ,_a : int=4 ,_a : Union[str, Any]=3 ,_a : List[str]=96 ,_a : Union[str, Any]=False ,_a : str=[192, 384, 768, 768] ,_a : Dict=[2, 2, 6, 2] ,_a : Any=[2, 2, 2, 2] ,_a : str=[3, 3, 3, 3] ,_a : Optional[int]="gelu" ,_a : Tuple=4.0 ,_a : str=0.0 ,_a : Union[str, Any]=0.1 ,_a : str=False ,_a : int=1E-4 ,_a : Dict=False ,_a : Optional[Any]=False ,_a : Optional[int]=False ,_a : str=0.02 ,_a : Optional[Any]=1E-5 ,_a : List[str]=32 ,_a : Tuple=None ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(**_a )
_a : List[str] = image_size
_a : Optional[Any] = patch_size
_a : List[str] = num_channels
_a : List[str] = embed_dim
_a : Optional[int] = use_conv_embed
_a : Optional[Any] = hidden_sizes
_a : Dict = depths
_a : int = focal_levels
_a : Optional[int] = focal_windows
_a : List[str] = hidden_act
_a : List[Any] = mlp_ratio
_a : Optional[int] = hidden_dropout_prob
_a : Optional[int] = drop_path_rate
_a : str = use_layerscale
_a : Any = layerscale_value
_a : Tuple = use_post_layernorm
_a : List[str] = use_post_layernorm_in_modulation
_a : Optional[int] = normalize_modulator
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
_a : Optional[int] = encoder_stride
_a : List[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
_a : Dict = get_aligned_output_features_output_indices(
out_features=_a ,out_indices=_a ,stage_names=self.stage_names )
| 355 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
'''simple docstring'''
import os
def UpperCAmelCase_ (__a : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(__a ) , __a ) ) as input_file:
_a : List[str] = [
[int(__a ) for element in line.split(',' )]
for line in input_file.readlines()
]
_a : List[str] = len(__a )
_a : Union[str, Any] = len(matrix[0] )
_a : str = [[-1 for _ in range(__a )] for _ in range(__a )]
for i in range(__a ):
_a : Optional[Any] = matrix[i][0]
for j in range(1 , __a ):
for i in range(__a ):
_a : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __a ):
_a : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_a : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : str = len(__a )
_a : Union[str, Any] = len(__a )
_a : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_a : List[str] = True
for i in range(__a ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_a : List[Any] = True
if a[i].islower():
_a : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_a : List[str] = [1_4_4, 1_9_2, 2_4_0]
_a : int = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
_a : Optional[Any] = [9_6, 1_2_0, 1_4_4]
_a : int = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
_a : Any = [6_4, 8_0, 9_6]
_a : List[Any] = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
_a : List[str] = 0.05
_a : Any = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_a : Union[str, Any] = 5_1_2
_a : Any = 1_6
_a : Optional[Any] = 2_1
_a : Optional[Any] = 'pascal-voc-id2label.json'
else:
_a : Tuple = 1_0_0_0
_a : Optional[int] = 'imagenet-1k-id2label.json'
_a : Any = 'huggingface/label-files'
_a : Dict = json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
_a : List[str] = {int(__a ): v for k, v in idalabel.items()}
_a : List[str] = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ (__a : str , __a : str=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
_a : Any = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
_a : Any = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_a : Any = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_a : Tuple = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_a : List[str] = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_a : Optional[int] = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_a : Tuple = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_a : Optional[Any] = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_a : Union[str, Any] = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_a : Tuple = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
_a : List[Any] = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
_a : Tuple = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
_a : List[str] = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_a : List[str] = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_a : str = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
_a : Optional[int] = name.replace(f""".global_rep.{i}.weight""" , '.layernorm.weight' )
if f""".global_rep.{i}.bias""" in name:
_a : int = name.replace(f""".global_rep.{i}.bias""" , '.layernorm.bias' )
if ".global_rep." in name:
_a : int = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_a : Optional[int] = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_a : str = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_a : List[Any] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_a : Dict = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_a : List[Any] = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_a : List[str] = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_a : Any = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_a : List[Any] = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_a : str = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_a : Optional[Any] = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_a : Optional[Any] = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_a : Optional[Any] = 'mobilevit.' + name
return name
def UpperCAmelCase_ (__a : List[str] , __a : Optional[Any] , __a : Tuple=False ):
"""simple docstring"""
if base_model:
_a : Tuple = ''
else:
_a : Union[str, Any] = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_a : List[Any] = orig_state_dict.pop(__a )
if key[:8] == "encoder.":
_a : Union[str, Any] = key[8:]
if "qkv" in key:
_a : Any = key.split('.' )
_a : str = int(key_split[0][6:] ) - 1
_a : Any = int(key_split[3] )
_a : Any = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
_a : Union[str, Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_a : List[Any] = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
_a : Dict = val[:dim, :]
_a : Optional[int] = val[dim : dim * 2, :]
_a : Any = val[-dim:, :]
else:
_a : Any = val[:dim]
_a : List[Any] = val[dim : dim * 2]
_a : Dict = val[-dim:]
else:
_a : Tuple = val
return orig_state_dict
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : int = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Optional[int] , __a : Tuple , __a : str=False ):
"""simple docstring"""
_a : Dict = get_mobilevit_config(__a )
# load original state_dict
_a : Dict = torch.load(__a , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_a : Any = MobileViTForSemanticSegmentation(__a ).eval()
else:
_a : List[str] = MobileViTForImageClassification(__a ).eval()
_a : Union[str, Any] = convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by MobileViTImageProcessor
_a : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
_a : str = image_processor(images=prepare_img() , return_tensors='pt' )
_a : Optional[Any] = model(**__a )
_a : Dict = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
_a : Optional[int] = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_a : Union[str, Any] = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_a : Dict = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __a , atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
_a : Optional[int] = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_a : Optional[int] = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_a : Any = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
_a : Tuple = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_a : Optional[int] = model_mapping[mobilevit_name]
image_processor.push_to_hub(__a , organization='apple' )
model.push_to_hub(__a , organization='apple' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 359 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = DanceDiffusionPipeline
__UpperCAmelCase : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__UpperCAmelCase : Tuple = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
__UpperCAmelCase : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = False
def __lowercase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Dict = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_a ,use_timestep_embedding=_a ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_a : List[Any] = IPNDMScheduler()
_a : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowercase ( self : Union[str, Any] ,_a : Dict ,_a : Union[str, Any]=0 ):
'''simple docstring'''
if str(_a ).startswith('mps' ):
_a : List[Any] = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : str = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : Any = DanceDiffusionPipeline(**_a )
_a : List[str] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Dict = self.get_dummy_inputs(_a )
_a : List[Any] = pipe(**_a )
_a : Tuple = output.audios
_a : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_a : List[Any] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowercase ( self : List[str] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Tuple = torch_device
_a : str = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_a : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : int = torch.manual_seed(0 )
_a : Union[str, Any] = pipe(generator=_a ,num_inference_steps=100 ,audio_length_in_s=4.096 )
_a : Tuple = output.audios
_a : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_a : Any = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : List[str] = torch_device
_a : Union[str, Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_a : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : List[Any] = torch.manual_seed(0 )
_a : Optional[int] = pipe(generator=_a ,num_inference_steps=100 ,audio_length_in_s=4.096 )
_a : Tuple = output.audios
_a : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_a : Any = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
assert x is not None
assert y is not None
_a : Optional[int] = len(__a )
_a : int = len(__a )
# declaring the array for storing the dp values
_a : Union[str, Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_a : Union[str, Any] = 1 if x[i - 1] == y[j - 1] else 0
_a : Optional[int] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_a : int = ''
_a : Tuple = m, n
while i > 0 and j > 0:
_a : str = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_a : Union[str, Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__lowerCAmelCase = """AGGTAB"""
__lowerCAmelCase = """GXTXAYB"""
__lowerCAmelCase = 4
__lowerCAmelCase = """GTAB"""
__lowerCAmelCase , __lowerCAmelCase = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 361 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : Union[str, Any] = '''BlipImageProcessor'''
__UpperCAmelCase : Any = '''AutoTokenizer'''
def __init__( self : Optional[Any] ,_a : Any ,_a : List[str] ,_a : Optional[int] ):
'''simple docstring'''
super().__init__(_a ,_a )
# add QFormer tokenizer
_a : Dict = qformer_tokenizer
def __call__( self : List[str] ,_a : ImageInput = None ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_a : List[str] = BatchFeature()
if text is not None:
_a : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
encoding.update(_a )
_a : str = self.qformer_tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
_a : Optional[Any] = qformer_text_encoding.pop('input_ids' )
_a : Union[str, Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_a : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def __lowercase ( self : int ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __lowercase ( self : List[str] ,_a : Union[str, Any] ,**_a : Tuple ):
'''simple docstring'''
if os.path.isfile(_a ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_a ,exist_ok=_a )
_a : List[Any] = os.path.join(_a ,'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_a )
return super().save_pretrained(_a ,**_a )
@classmethod
def __lowercase ( cls : Union[str, Any] ,_a : Optional[int] ,**_a : int ):
'''simple docstring'''
_a : List[str] = AutoTokenizer.from_pretrained(_a ,subfolder='qformer_tokenizer' )
_a : str = cls._get_arguments_from_pretrained(_a ,**_a )
args.append(_a )
return cls(*_a )
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ (__a : np.ndarray ):
"""simple docstring"""
return np.array_equal(__a , matrix.conjugate().T )
def UpperCAmelCase_ (__a : np.ndarray , __a : np.ndarray ):
"""simple docstring"""
_a : Tuple = v.conjugate().T
_a : int = v_star.dot(__a )
assert isinstance(__a , np.ndarray )
return (v_star_dot.dot(__a )) / (v_star.dot(__a ))
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_a : Any = np.array([[1], [2], [3]] )
assert is_hermitian(__a ), f"""{a} is not hermitian."""
print(rayleigh_quotient(__a , __a ) )
_a : List[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__a ), f"""{a} is not hermitian."""
assert rayleigh_quotient(__a , __a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 363 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 0 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : int ,_a : Union[str, Any] ,_a : List[str]=None ,_a : Dict=1 ):
'''simple docstring'''
_a : str = tokenizer
_a : List[Any] = dataset
_a : Dict = len(_a ) if n_tasks is None else n_tasks
_a : Tuple = n_copies
def __iter__( self : int ):
'''simple docstring'''
_a : Optional[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
_a : str = self.tokenizer(_a ,padding=_a ,return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : List[Any] ,_a : Any ,_a : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = start_length
_a : int = eof_strings
_a : Dict = tokenizer
def __call__( self : Optional[int] ,_a : Tuple ,_a : str ,**_a : List[Any] ):
'''simple docstring'''
_a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_a : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_a )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = re.split('(%s)' % '|'.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ (__a : Dict , __a : Optional[Any] , __a : Union[str, Any] , __a : int , __a : List[Any] , __a : Optional[Any]=2_0 , **__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
_a : str = batch['ids'].shape[-1]
_a : List[Any] = accelerator.unwrap_model(__a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
_a : Optional[Any] = batch['task_id'].repeat(__a )
_a : int = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
_a : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
_a : Any = generated_tokens.cpu().numpy()
_a : List[str] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
_a : str = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_a : Tuple = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = HfArgumentParser(__a )
_a : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_a : Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_a : Union[str, Any] = 'false'
if args.num_workers is None:
_a : str = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_a : Any = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
_a : Optional[int] = AutoTokenizer.from_pretrained(args.model_ckpt )
_a : Optional[int] = tokenizer.eos_token
_a : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_a : Dict = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
_a : Optional[int] = load_dataset('openai_humaneval' )
_a : Optional[int] = load_metric('code_eval' )
_a : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
_a : Union[str, Any] = args.n_samples // args.batch_size
_a : str = TokenizedDataset(__a , human_eval['test'] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
_a : str = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_a : Tuple = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
_a : str = accelerator.prepare(__a , __a )
_a : Optional[int] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
_a : List[Any] = []
for task in tqdm(range(__a ) ):
_a : Dict = human_eval['test'][task]['test']
_a : Optional[int] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
_a : str = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__lowerCAmelCase = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__lowerCAmelCase = f'''https://www.google.com/search?q={query}&num=100'''
__lowerCAmelCase = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__lowerCAmelCase = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__lowerCAmelCase = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowerCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = '''vision-encoder-decoder'''
__UpperCAmelCase : Tuple = True
def __init__( self : List[Any] ,**_a : List[Any] ):
'''simple docstring'''
super().__init__(**_a )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_a : Tuple = kwargs.pop('encoder' )
_a : int = encoder_config.pop('model_type' )
_a : List[str] = kwargs.pop('decoder' )
_a : Union[str, Any] = decoder_config.pop('model_type' )
_a : Optional[Any] = AutoConfig.for_model(_a ,**_a )
_a : List[Any] = AutoConfig.for_model(_a ,**_a )
_a : int = True
@classmethod
def __lowercase ( cls : List[Any] ,_a : PretrainedConfig ,_a : PretrainedConfig ,**_a : Dict ):
'''simple docstring'''
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_a : Optional[int] = True
_a : str = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = copy.deepcopy(self.__dict__ )
_a : Tuple = self.encoder.to_dict()
_a : Optional[Any] = self.decoder.to_dict()
_a : str = self.__class__.model_type
return output
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = version.parse('''1.11''' )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return 1E-4
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = OrderedDict()
_a : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_a : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_a : Any = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def __lowercase ( self : Any ,_a : "PreTrainedTokenizerBase" ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
import torch
_a : List[str] = OrderedDict()
_a : Optional[Any] = super().generate_dummy_inputs(
_a ,batch_size=_a ,seq_length=_a ,is_pair=_a ,framework=_a )
_a : Optional[int] = dummy_input['input_ids'].shape
_a : Dict = (batch, encoder_sequence, self._config.encoder_hidden_size)
_a : Tuple = dummy_input.pop('input_ids' )
_a : int = dummy_input.pop('attention_mask' )
_a : str = torch.zeros(_a )
return common_inputs
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : List[str] ,_a : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(_a )
def __lowercase ( self : List[str] ,_a : PretrainedConfig ,_a : PretrainedConfig ,_a : str = "default" ):
'''simple docstring'''
_a : Optional[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_a ,_a )
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Optional[int] , __a : List[str]=None ):
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : List[Any] = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Tuple = nn.Parameter(__a )
def UpperCAmelCase_ (__a : str , __a : Optional[int] , __a : Any ):
_a : int = np.asarray(weights[0] )
_a : Optional[Any] = np.asarray(weights[1] )
_a : Tuple = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : int , __a : List[Any] , __a : List[str] ):
_a : List[Any] = np.asarray(weights[0] )
_a : Optional[Any] = np.asarray(weights[1] )
_a : List[str] = np.asarray(weights[2] )
_a : List[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Union[str, Any] ):
_a : Dict = weights[0][0][0]
_a : Tuple = np.asarray(layer_norm_a[0] )
_a : Dict = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : str = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Optional[Any] = intermediate_weights[2]
# layernorm 2
_a : str = np.asarray(intermediate_weights[0][0] )
_a : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Tuple = np.asarray(intermediate_weights[1][0] )
_a : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : str , __a : List[Any] , __a : List[str] ):
_a : Union[str, Any] = torch_model.reformer
# word embeds
_a : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : str = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : Union[str, Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : int = nn.Parameter(torch.tensor(__a ) )
_a : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : str = np.asarray(weights[7][0] )
_a : Any = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : Dict = np.asarray(weights[9][0] )
_a : List[str] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] , __a : Optional[int] ):
_a : Tuple = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : List[str] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
'''simple docstring'''
import re
import subprocess
import sys
__lowerCAmelCase = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__lowerCAmelCase = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
__lowerCAmelCase = """|""".join(sys.argv[1:])
__lowerCAmelCase = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 369 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase_ (__a : Dict , __a : List[Any] , __a : Tuple , __a : Tuple ):
"""simple docstring"""
_a : int = sorted(zip(__a , __a ) , key=lambda __a : x[0] / x[1] , reverse=__a )
_a : str = [i[0] for i in r], [i[1] for i in r]
_a : str = list(accumulate(__a ) )
_a : Optional[int] = bisect(__a , __a )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
def UpperCAmelCase_ (__a : str , __a : int=1_0_0 , __a : Tuple=" " ):
"""simple docstring"""
_a : int = text.split(__a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__a ) , __a )]
def UpperCAmelCase_ (__a : dict ):
"""simple docstring"""
_a : Dict = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(__a ):
titles.append(title if title is not None else '' )
texts.append(__a )
return {"title": titles, "text": texts}
def UpperCAmelCase_ (__a : dict , __a : DPRContextEncoder , __a : DPRContextEncoderTokenizerFast ):
"""simple docstring"""
_a : Dict = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=__a , padding='longest' , return_tensors='pt' )['input_ids']
_a : Optional[int] = ctx_encoder(input_ids.to(device=__a ) , return_dict=__a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ (__a : "RagExampleArguments" , __a : "ProcessingArguments" , __a : "IndexHnswArguments" , ):
"""simple docstring"""
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_a : Any = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_a : List[str] = dataset.map(__a , batched=__a , num_proc=processing_args.num_proc )
# And compute the embeddings
_a : Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__a )
_a : int = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_a : str = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
_a : Optional[Any] = dataset.map(
partial(__a , ctx_encoder=__a , ctx_tokenizer=__a ) , batched=__a , batch_size=processing_args.batch_size , features=__a , )
# And finally save your dataset
_a : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(__a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_a : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=__a )
# And save the index
_a : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(__a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = field(
default=str(Path(lowercase__ ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
__UpperCAmelCase : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
__UpperCAmelCase : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=str(Path(lowercase__ ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
__UpperCAmelCase : int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
__UpperCAmelCase : int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 371 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 0 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = '''bertabs'''
def __init__( self : Union[str, Any] ,_a : Optional[int]=3_0522 ,_a : List[Any]=512 ,_a : Any=6 ,_a : Union[str, Any]=512 ,_a : List[Any]=8 ,_a : int=512 ,_a : Union[str, Any]=0.2 ,_a : Union[str, Any]=6 ,_a : Union[str, Any]=768 ,_a : str=8 ,_a : str=2048 ,_a : str=0.2 ,**_a : List[str] ,):
'''simple docstring'''
super().__init__(**_a )
_a : Any = vocab_size
_a : List[Any] = max_pos
_a : Union[str, Any] = enc_layers
_a : Optional[Any] = enc_hidden_size
_a : Dict = enc_heads
_a : List[Any] = enc_ff_size
_a : Any = enc_dropout
_a : Any = dec_layers
_a : List[str] = dec_hidden_size
_a : Dict = dec_heads
_a : Optional[int] = dec_ff_size
_a : Optional[int] = dec_dropout
| 350 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def __lowercase ( *_a : Optional[int] ,**_a : Optional[int] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowercase ( self : Dict ,_a : int ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Dict = ObjectDetectionPipeline(model=_a ,image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowercase ( self : Union[str, Any] ,_a : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : List[str] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' ,threshold=0.0 )
self.assertGreater(len(_a ) ,0 )
for detected_object in outputs:
self.assertEqual(
_a ,{
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} ,)
import datasets
_a : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test' )
_a : List[Any] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_a : str = object_detector(_a ,threshold=0.0 )
self.assertEqual(len(_a ) ,len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) ,0 )
for detected_object in outputs:
self.assertEqual(
_a ,{
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} ,)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_a : int = AutoModelForObjectDetection.from_pretrained(_a )
_a : Optional[Any] = AutoFeatureExtractor.from_pretrained(_a )
_a : int = ObjectDetectionPipeline(model=_a ,feature_extractor=_a )
_a : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=0.0 )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] ,)
_a : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] ,)
@require_torch
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Any = 'facebook/detr-resnet-50'
_a : List[str] = AutoModelForObjectDetection.from_pretrained(_a )
_a : str = AutoFeatureExtractor.from_pretrained(_a )
_a : Union[str, Any] = ObjectDetectionPipeline(model=_a ,feature_extractor=_a )
_a : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_a : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Tuple = 'facebook/detr-resnet-50'
_a : List[Any] = pipeline('object-detection' ,model=_a )
_a : List[str] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_a : List[str] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[Any] = 0.9985
_a : List[str] = 'facebook/detr-resnet-50'
_a : List[Any] = pipeline('object-detection' ,model=_a )
_a : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=_a )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd'
_a : Tuple = 0.9993
_a : Optional[Any] = pipeline('object-detection' ,model=_a ,threshold=_a )
_a : List[Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] ,)
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCAmelCase = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
_a : Optional[int] = []
for num in range(len(__a ) ):
_a : List[str] = 0
while 2 * i * i <= odd_composites[num]:
_a : Optional[Any] = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def UpperCAmelCase_ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 352 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
'''simple docstring'''
__lowerCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase_ (__a : dict[int, list[int]] , __a : int , __a : list[bool] ):
"""simple docstring"""
_a : Optional[Any] = True
_a : Dict = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__a , __a , __a )
order.append(__a )
return order
def UpperCAmelCase_ (__a : dict[int, list[int]] , __a : int , __a : list[bool] ):
"""simple docstring"""
_a : List[str] = True
_a : Tuple = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__a , __a , __a )
return component
def UpperCAmelCase_ (__a : dict[int, list[int]] ):
"""simple docstring"""
_a : Dict = len(__a ) * [False]
_a : dict[int, list[int]] = {vert: [] for vert in range(len(__a ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__a )
_a : Union[str, Any] = []
for i, was_visited in enumerate(__a ):
if not was_visited:
order += topology_sort(__a , __a , __a )
_a : Optional[Any] = []
_a : Union[str, Any] = len(__a ) * [False]
for i in range(len(__a ) ):
_a : List[str] = order[len(__a ) - i - 1]
if not visited[vert]:
_a : List[str] = find_components(__a , __a , __a )
components_list.append(__a )
return components_list
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : Tuple = 1_6
__lowerCAmelCase : Optional[Any] = 3_2
def UpperCAmelCase_ (__a : Accelerator , __a : int = 1_6 ):
"""simple docstring"""
_a : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : Optional[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(__a : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Dict = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__a : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : str = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Any = 1_6
elif accelerator.mixed_precision != "no":
_a : Any = 8
else:
_a : List[Any] = None
return tokenizer.pad(
__a , padding='longest' , max_length=__a , pad_to_multiple_of=__a , return_tensors='pt' , )
# Instantiate dataloaders.
_a : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=__a )
_a : int = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def UpperCAmelCase_ (__a : Dict , __a : str ):
"""simple docstring"""
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : List[str] = config['lr']
_a : Optional[Any] = int(config['num_epochs'] )
_a : Union[str, Any] = int(config['seed'] )
_a : List[str] = int(config['batch_size'] )
_a : List[str] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Any = batch_size // MAX_GPU_BATCH_SIZE
_a : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__a )
_a : int = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_a : Union[str, Any] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
_a : List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a : Any = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Union[str, Any] = model(**__a )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Union[str, Any] = outputs.logits.argmax(dim=-1 )
_a : Tuple = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__a , references=__a , )
_a : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_a : List[Any] = parser.parse_args()
_a : str = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 354 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : str=None ,_a : int=None ,*_a : Optional[int] ,**_a : List[str] ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if config is None:
assert isinstance(self.model ,_a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_a : Tuple = self.model.config
else:
_a : Tuple = config
_a : int = data_args
_a : Tuple = self.config.tgt_vocab_size if isinstance(self.config ,_a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
_a : List[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a : str = label_smoothed_nll_loss
def __lowercase ( self : Dict ,_a : int ):
'''simple docstring'''
if self.optimizer is None:
_a : Dict = ['bias', 'LayerNorm.weight']
_a : List[str] = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_a : List[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a : List[Any] = Adafactor
_a : Tuple = {'scale_parameter': False, 'relative_step': False}
else:
_a : Tuple = AdamW
_a : int = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_a : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_a : Optional[int] = OSS(
params=_a ,optim=_a ,**_a ,)
else:
_a : Tuple = optimizer_cls(_a ,**_a )
if self.lr_scheduler is None:
_a : int = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowercase ( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
_a : Optional[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a : Any = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a : int = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
_a : Union[str, Any] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a )
return scheduler
def __lowercase ( self : List[str] ):
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowercase ( self : List[str] ,_a : Tuple ,_a : Tuple ,_a : int ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a : Tuple = model(**_a ,use_cache=_a )[0]
_a : int = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
_a : List[Any] = model(**_a ,labels=_a ,use_cache=_a )[:2]
else:
# compute label smoothed loss
_a : Any = model(**_a ,use_cache=_a )[0]
_a : int = torch.nn.functional.log_softmax(_a ,dim=-1 )
_a : Optional[int] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : Any ):
'''simple docstring'''
_a : List[Any] = inputs.pop('labels' )
_a : Any = self._compute_loss(_a ,_a ,_a )
return loss
def __lowercase ( self : List[str] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ,_a : bool ,_a : Optional[List[str]] = None ,):
'''simple docstring'''
_a : List[str] = self._prepare_inputs(_a )
_a : List[str] = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a : List[Any] = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a : Union[str, Any] = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
_a : Union[str, Any] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_a : Tuple = self._compute_loss(_a ,_a ,_a )
_a : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a : Union[str, Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a : List[str] = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowercase ( self : List[str] ,_a : Optional[int] ,_a : Tuple ):
'''simple docstring'''
_a : str = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
_a : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
_a : Optional[int] = tensor
return padded_tensor
| 355 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
'''simple docstring'''
from timeit import timeit
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
_a : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
_a : Optional[int] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase_ ():
"""simple docstring"""
def do_benchmark(__a : int ) -> None:
_a : Dict = 'import __main__ as z'
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__a ) = }""" )
_a : Tuple = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__a )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }""" )
_a : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__a , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : int ):
"""simple docstring"""
_a : list[list[str]] = [[] for _ in range(__a )]
_a : Optional[int] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(__a ) <= key:
return input_string
for position, character in enumerate(__a ):
_a : int = position % (lowest * 2) # puts it in bounds
_a : Tuple = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__a )
_a : List[Any] = [''.join(__a ) for row in temp_grid]
_a : Optional[int] = ''.join(__a )
return output_string
def UpperCAmelCase_ (__a : str , __a : int ):
"""simple docstring"""
_a : Optional[Any] = []
_a : List[str] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
_a : list[list[str]] = [[] for _ in range(__a )] # generates template
for position in range(len(__a ) ):
_a : Optional[int] = position % (lowest * 2) # puts it in bounds
_a : List[str] = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
_a : Any = 0
for row in temp_grid: # fills in the characters
_a : str = input_string[counter : counter + len(__a )]
grid.append(list(__a ) )
counter += len(__a )
_a : int = '' # reads as zigzag
for position in range(len(__a ) ):
_a : str = position % (lowest * 2) # puts it in bounds
_a : Union[str, Any] = min(__a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[int] = {}
for key_guess in range(1 , len(__a ) ): # tries every key
_a : Optional[Any] = decrypt(__a , __a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "AAPL" ):
"""simple docstring"""
_a : Tuple = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_a : List[Any] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Any = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : int = 16 ,_a : int = 88 ,_a : Optional[int] = None ,_a : int = 1 ,_a : float = 0.0 ,_a : int = 32 ,_a : Optional[int] = None ,_a : bool = False ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : str = "geglu" ,_a : Optional[int] = None ,):
'''simple docstring'''
super().__init__()
_a : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_a ,attention_head_dim=_a ,in_channels=_a ,num_layers=_a ,dropout=_a ,norm_num_groups=_a ,cross_attention_dim=_a ,attention_bias=_a ,sample_size=_a ,num_vector_embeds=_a ,activation_fn=_a ,num_embeds_ada_norm=_a ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_a : Dict = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_a : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_a : List[str] = [1, 0]
def __lowercase ( self : Dict ,_a : str ,_a : int ,_a : Any=None ,_a : List[str]=None ,_a : str=None ,_a : bool = True ,):
'''simple docstring'''
_a : List[Any] = hidden_states
_a : int = []
_a : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_a : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_a : Union[str, Any] = self.transformer_index_for_condition[i]
_a : Any = self.transformers[transformer_index](
_a ,encoder_hidden_states=_a ,timestep=_a ,cross_attention_kwargs=_a ,return_dict=_a ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_a : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_a : Any = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_a )
| 359 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 0 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""ChineseCLIPFeatureExtractor"""]
__lowerCAmelCase = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowerCAmelCase = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Any ):
"""simple docstring"""
inspect_dataset(__a , __a )
_a : List[Any] = path + '.py'
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def UpperCAmelCase_ (__a : Union[str, Any] , __a : List[str] ):
"""simple docstring"""
inspect_metric(__a , __a )
_a : Optional[int] = path + '.py'
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase_ (__a : str , __a : List[str] , __a : List[str] ):
"""simple docstring"""
_a : List[Any] = get_dataset_config_info(__a , config_name=__a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase_ (__a : Dict , __a : Optional[Any] , __a : Optional[int] ):
"""simple docstring"""
with pytest.raises(__a ):
get_dataset_config_info(__a , config_name=__a )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] ):
"""simple docstring"""
_a : str = get_dataset_config_names(__a )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[int] , __a : Any ):
"""simple docstring"""
_a : List[Any] = get_dataset_infos(__a )
assert list(infos.keys() ) == expected_configs
_a : List[Any] = expected_configs[0]
assert expected_config in infos
_a : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : List[Any] , __a : Optional[int] ):
"""simple docstring"""
_a : Optional[Any] = get_dataset_infos(__a )
assert expected_config in infos
_a : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase_ (__a : Union[str, Any] , __a : List[str] , __a : Optional[int] ):
"""simple docstring"""
with pytest.raises(__a ):
get_dataset_split_names(__a , config_name=__a )
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase_ (__a : BertModel , __a : str , __a : str ):
"""simple docstring"""
_a : Optional[Any] = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
_a : str = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
_a : List[str] = model.state_dict()
def to_tf_var_name(__a : str ):
for patt, repl in iter(__a ):
_a : Optional[int] = name.replace(__a , __a )
return f"""bert/{name}"""
def create_tf_var(__a : np.ndarray , __a : str , __a : tf.Session ):
_a : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
_a : Union[str, Any] = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_a : str = to_tf_var_name(__a )
_a : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_a : str = torch_tensor.T
_a : str = create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
_a : Optional[Any] = session.run(__a )
print(f"""Successfully created {tf_name}: {np.allclose(__a , __a )}""" )
_a : Optional[int] = tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace('-' , '_' ) + '.ckpt' ) )
def UpperCAmelCase_ (__a : Union[str, Any]=None ):
"""simple docstring"""
_a : Dict = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__a , required=__a , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=__a , default=__a , required=__a , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=__a , required=__a , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=__a , required=__a , help='Directory in which to save tensorflow model' )
_a : Dict = parser.parse_args(__a )
_a : int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCAmelCase = Lock()
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_a : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_a : List[str] = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_a : Dict = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_a : Union[str, Any] = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : int = []
_a : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_a : Dict = Pipe()
_a : Optional[Any] = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_a : List[Any] = temp_rs
_a : Optional[int] = temp_rr
for i in range(1 , len(__a ) - 1 ):
_a : int = Pipe()
_a : List[str] = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_a : List[str] = temp_rs
_a : str = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
_a : List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*__a )
_a : Tuple = odd_even_transposition(__a )
print('Sorted List\n' )
print(*__a )
if __name__ == "__main__":
main()
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('foo.json',)] )
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : List[str] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,config_name=_a )
_a : Any = GenerationConfig.from_pretrained(_a ,config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample ,_a )
self.assertEqual(loaded_config.temperature ,0.7 )
self.assertEqual(loaded_config.length_penalty ,1.0 )
self.assertEqual(loaded_config.bad_words_ids ,[[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k ,50 )
self.assertEqual(loaded_config.max_length ,20 )
self.assertEqual(loaded_config.max_time ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = AutoConfig.from_pretrained('gpt2' )
_a : Any = GenerationConfig.from_model_config(_a )
_a : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a ,_a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id ,default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id ,model_config.eos_token_id )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Any = GenerationConfig()
_a : List[str] = {
'max_new_tokens': 1024,
'foo': 'bar',
}
_a : int = copy.deepcopy(_a )
_a : str = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a ,_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens ,1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a ,{'foo': 'bar'} )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = GenerationConfig()
_a : List[str] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(_a )
_a : Optional[int] = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo ,'bar' )
_a : Dict = GenerationConfig.from_model_config(_a )
assert not hasattr(_a ,'foo' ) # no new kwargs should be initialized if from config
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = GenerationConfig()
self.assertEqual(default_config.temperature ,1.0 )
self.assertEqual(default_config.do_sample ,_a )
self.assertEqual(default_config.num_beams ,1 )
_a : Tuple = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
self.assertEqual(config.temperature ,0.7 )
self.assertEqual(config.do_sample ,_a )
self.assertEqual(config.num_beams ,1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
_a : Optional[Any] = GenerationConfig.from_pretrained(_a ,temperature=1.0 )
self.assertEqual(loaded_config.temperature ,1.0 )
self.assertEqual(loaded_config.do_sample ,_a )
self.assertEqual(loaded_config.num_beams ,1 ) # default value
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Tuple ):
'''simple docstring'''
_a : int = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : Any ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[int] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('test-generation-config' ,use_auth_token=self._token )
_a : Any = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='test-generation-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : int = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('valid_org/test-generation-config-org' ,use_auth_token=self._token )
_a : str = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-generation-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Optional[int] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any ,_a : Optional[int] ,_a : Dict=13 ,_a : Union[str, Any]=7 ,_a : List[str]=True ,_a : Optional[Any]=True ,_a : Union[str, Any]=False ,_a : Optional[int]=True ,_a : Dict=99 ,_a : str=32 ,_a : str=5 ,_a : Any=4 ,_a : int=64 ,_a : Optional[int]="gelu" ,_a : List[str]=0.1 ,_a : Optional[Any]=0.1 ,_a : Union[str, Any]=512 ,_a : int=16 ,_a : Any=2 ,_a : int=0.02 ,_a : List[Any]=3 ,_a : List[Any]=4 ,_a : int=None ,_a : Tuple=2 ,_a : Optional[Any]=2 ,_a : Optional[int]=2 ,_a : int=2 ,_a : str=4 ,_a : Tuple=1 ,):
'''simple docstring'''
_a : int = parent
_a : Dict = batch_size
_a : Optional[int] = seq_length
_a : Optional[int] = is_training
_a : Tuple = use_input_mask
_a : List[Any] = use_token_type_ids
_a : Tuple = use_labels
_a : Dict = vocab_size
_a : Dict = hidden_size
_a : Tuple = num_hidden_layers
_a : int = num_attention_heads
_a : Optional[int] = intermediate_size
_a : str = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Any = max_position_embeddings
_a : List[str] = type_vocab_size
_a : Optional[int] = type_sequence_label_size
_a : Any = initializer_range
_a : int = num_labels
_a : Optional[int] = num_choices
_a : int = scope
_a : Dict = q_groups
_a : str = k_groups
_a : Dict = v_groups
_a : str = post_attention_groups
_a : List[str] = intermediate_groups
_a : List[str] = output_groups
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : Optional[int] = None
if self.use_input_mask:
_a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_a : Any = None
_a : Optional[Any] = None
_a : List[Any] = None
if self.use_labels:
_a : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : Dict = ids_tensor([self.batch_size] ,self.num_choices )
_a : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : List[str] ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def __lowercase ( self : Optional[Any] ,_a : List[Any] ,_a : str ,_a : Tuple ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = SqueezeBertModel(config=_a )
model.to(_a )
model.eval()
_a : Any = model(_a ,_a )
_a : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Optional[Any] ,_a : Any ,_a : int ,_a : Optional[Any] ,_a : Optional[int] ,_a : Union[str, Any] ,_a : List[str] ):
'''simple docstring'''
_a : Dict = SqueezeBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : Optional[Any] ,_a : Optional[int] ,_a : List[str] ,_a : Union[str, Any] ,_a : Dict ,_a : Dict ,_a : List[Any] ):
'''simple docstring'''
_a : List[Any] = SqueezeBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(
_a ,attention_mask=_a ,start_positions=_a ,end_positions=_a )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : List[str] ,_a : Any ,_a : Optional[int] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : str ):
'''simple docstring'''
_a : Any = self.num_labels
_a : Optional[Any] = SqueezeBertForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : int = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : List[Any] ,_a : List[Any] ,_a : Tuple ,_a : Tuple ,_a : int ,_a : int ,_a : Dict ):
'''simple docstring'''
_a : Any = self.num_labels
_a : Optional[Any] = SqueezeBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : Any = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : Tuple ,_a : int ,_a : str ,_a : List[str] ,_a : str ):
'''simple docstring'''
_a : Union[str, Any] = self.num_choices
_a : Optional[Any] = SqueezeBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : int = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : List[Any] = model(
_a ,attention_mask=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = self.prepare_config_and_inputs()
(_a) : Optional[Any] = config_and_inputs
_a : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase : List[str] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Optional[Any] = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = SqueezeBertModelTester(self )
_a : Optional[Any] = ConfigTester(self ,config_class=_a ,dim=37 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_a )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = SqueezeBertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
_a : str = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
_a : Optional[Any] = model(_a )[0]
_a : Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape ,_a )
_a : Any = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_a ,_a ,atol=1E-4 ) )
| 366 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 4_0_0_0_0_0_0 ):
_a : str = []
_a : Optional[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__a )
_a : Optional[int] = b, a + b
return sum(__a )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] ,_a : Optional[int] ,_a : int=13 ,_a : Optional[int]=7 ,_a : str=True ,_a : Union[str, Any]=True ,_a : List[str]=True ,_a : int=True ,_a : str=99 ,_a : str=24 ,_a : str=2 ,_a : Tuple=6 ,_a : str=37 ,_a : Any="gelu" ,_a : Tuple=0.1 ,_a : Any=0.1 ,_a : Optional[Any]=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Any=3 ,_a : Union[str, Any]=None ,_a : List[Any]=1000 ,):
'''simple docstring'''
_a : int = parent
_a : List[str] = batch_size
_a : List[str] = seq_length
_a : str = is_training
_a : Union[str, Any] = use_input_mask
_a : Dict = use_token_type_ids
_a : int = use_labels
_a : Any = vocab_size
_a : List[Any] = hidden_size
_a : List[str] = num_hidden_layers
_a : Dict = num_attention_heads
_a : Union[str, Any] = intermediate_size
_a : int = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Union[str, Any] = max_position_embeddings
_a : str = type_vocab_size
_a : Dict = type_sequence_label_size
_a : Optional[Any] = initializer_range
_a : Any = num_labels
_a : List[Any] = scope
_a : Dict = range_bbox
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : int = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a : List[Any] = bbox[i, j, 3]
_a : Any = bbox[i, j, 1]
_a : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_a : str = bbox[i, j, 2]
_a : List[str] = bbox[i, j, 0]
_a : Optional[Any] = t
_a : str = None
if self.use_input_mask:
_a : Any = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_a : List[str] = None
if self.use_token_type_ids:
_a : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_a : int = None
_a : Dict = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : Union[str, Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Optional[Any] ,_a : List[Any] ,_a : str ,_a : int ,_a : Dict ,_a : List[Any] ,_a : str ,_a : Optional[int] ,):
'''simple docstring'''
_a : List[Any] = LiltModel(config=_a )
model.to(_a )
model.eval()
_a : Any = model(_a ,bbox=_a ,attention_mask=_a ,token_type_ids=_a )
_a : Optional[Any] = model(_a ,bbox=_a ,token_type_ids=_a )
_a : List[Any] = model(_a ,bbox=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __lowercase ( self : int ,_a : int ,_a : List[str] ,_a : Union[str, Any] ,_a : str ,_a : Optional[int] ,_a : Tuple ,_a : List[str] ,):
'''simple docstring'''
_a : str = self.num_labels
_a : str = LiltForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(
_a ,bbox=_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Optional[Any] ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : List[Any] ,_a : Tuple ,_a : Any ,_a : List[str] ,_a : Union[str, Any] ,):
'''simple docstring'''
_a : int = LiltForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(
_a ,bbox=_a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[str] = self.prepare_config_and_inputs()
(
_a
) : Union[str, Any] = config_and_inputs
_a : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = False
def __lowercase ( self : Union[str, Any] ,_a : Optional[int] ,_a : Tuple ,_a : Any ,_a : Dict ,_a : Dict ):
'''simple docstring'''
return True
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = LiltModelTester(self )
_a : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Any = type
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = LiltModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(_a )
_a : Optional[int] = torch.tensor([[1, 2]] ,device=_a )
_a : Dict = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=_a )
# forward pass
with torch.no_grad():
_a : List[Any] = model(input_ids=_a ,bbox=_a )
_a : Any = torch.Size([1, 2, 768] )
_a : Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] ,device=_a ,)
self.assertTrue(outputs.last_hidden_state.shape ,_a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,_a ,atol=1E-3 ) )
| 369 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 0 |
'''simple docstring'''
import argparse
import datetime
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[str] = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_a : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__a ) < 1_1:
raise ValueError('Must be 10 characters long' )
# Get month
_a : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('Month must be between 1 - 12' )
_a : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_a : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_a : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_a : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_a : Dict = datetime.date(int(__a ) , int(__a ) , int(__a ) )
# Start math
if m <= 2:
_a : Optional[Any] = y - 1
_a : Any = m + 1_2
# maths var
_a : int = int(str(__a )[:2] )
_a : int = int(str(__a )[2:] )
_a : int = int(2.6 * m - 5.39 )
_a : int = int(c / 4 )
_a : int = int(k / 4 )
_a : int = int(d + k )
_a : int = int(t + u + v + x )
_a : int = int(z - (2 * c) )
_a : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_a : str = f"""Your date {date_input}, is a {days[str(__a )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
__lowerCAmelCase = parser.parse_args()
zeller(args.date_input)
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 371 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lt__( self : Optional[int] ,_a : Any ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self : List[str] ,_a : int ):
'''simple docstring'''
return self[-1] == other[-1]
def UpperCAmelCase_ (__a : list ):
_a : list[Stack] = []
# sort into stacks
for element in collection:
_a : str = Stack([element] )
_a : Optional[int] = bisect_left(__a , __a )
if i != len(__a ):
stacks[i].append(__a )
else:
stacks.append(__a )
# use a heap-based merge to merge stack efficiently
_a : Union[str, Any] = merge(*(reversed(__a ) for stack in stacks) )
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 350 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ,_a : Dict ,_a : Dict=13 ,_a : int=7 ,_a : Any=True ,_a : Dict=True ,_a : int=False ,_a : Optional[Any]=True ,_a : Optional[int]=99 ,_a : Union[str, Any]=64 ,_a : Dict=5 ,_a : List[str]=4 ,_a : Union[str, Any]=64 ,_a : Any="gelu" ,_a : Dict=0.1 ,_a : int=0.1 ,_a : Optional[int]=512 ,_a : List[str]=16 ,_a : int=2 ,_a : Optional[Any]=0.02 ,_a : int=3 ,_a : Optional[Any]=4 ,_a : Dict=None ,):
'''simple docstring'''
_a : List[str] = parent
_a : Any = batch_size
_a : str = seq_length
_a : Dict = is_training
_a : int = use_input_mask
_a : str = use_token_type_ids
_a : Dict = use_labels
_a : Optional[int] = vocab_size
_a : Tuple = hidden_size
_a : str = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Any = intermediate_size
_a : Optional[Any] = hidden_act
_a : Tuple = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : int = type_vocab_size
_a : Dict = type_sequence_label_size
_a : Tuple = initializer_range
_a : Optional[Any] = num_labels
_a : List[str] = num_choices
_a : Dict = scope
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : List[str] = None
if self.use_input_mask:
_a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_a : Tuple = None
_a : List[Any] = None
_a : int = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
_a : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : str ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Optional[Any] ,_a : Dict ,_a : Dict ,_a : str ,_a : Dict ,_a : Any ,_a : int ):
'''simple docstring'''
_a : List[str] = MPNetModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a ,_a )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : Optional[Any] ,_a : List[Any] ,_a : List[Any] ,_a : Tuple ,_a : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : List[Any] = MPNetForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(
_a ,attention_mask=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : Any ,_a : Optional[int] ,_a : Tuple ):
'''simple docstring'''
_a : int = self.num_labels
_a : Union[str, Any] = MPNetForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : Dict ,_a : Any ,_a : Optional[int] ,_a : Tuple ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = self.num_choices
_a : Any = MPNetForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : Tuple = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : List[str] = model(
_a ,attention_mask=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowercase ( self : Any ,_a : Optional[int] ,_a : Optional[int] ,_a : List[str] ,_a : List[Any] ,_a : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = self.num_labels
_a : Optional[int] = MPNetForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Dict = self.prepare_config_and_inputs()
(_a) : List[Any] = config_and_inputs
_a : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Dict = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Any = True
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = MPNetModelTester(self )
_a : List[Any] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_a )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_a : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_a : Optional[int] = model(_a )[0]
_a : Optional[int] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,_a )
_a : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1E-4 ) )
| 352 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
'''simple docstring'''
from collections import deque
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : Any = len(__a )
_a : Optional[Any] = deque()
_a : Any = [False for _ in range(__a )]
_a : Dict = [-1 for _ in range(__a )]
_a : str = index_of[:]
def strong_connect(__a : Tuple , __a : List[Any] , __a : Union[str, Any] ):
_a : Union[str, Any] = index # the number when this node is seen
_a : Tuple = index # lowest rank node reachable from here
index += 1
stack.append(__a )
_a : str = True
for w in g[v]:
if index_of[w] == -1:
_a : Optional[Any] = strong_connect(__a , __a , __a )
_a : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_a : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_a : Any = []
_a : List[Any] = stack.pop()
_a : Optional[int] = False
component.append(__a )
while w != v:
_a : Optional[int] = stack.pop()
_a : Optional[Any] = False
component.append(__a )
components.append(__a )
return index
_a : Dict = []
for v in range(__a ):
if index_of[v] == -1:
strong_connect(__a , 0 , __a )
return components
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] ):
"""simple docstring"""
_a : str = [[] for _ in range(__a )]
for u, v in edges:
g[u].append(__a )
return g
if __name__ == "__main__":
# Test
__lowerCAmelCase = 7
__lowerCAmelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__lowerCAmelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__lowerCAmelCase = [(u, v) for u, v in zip(source, target)]
__lowerCAmelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.