code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'mobilenet_v1'
def __init__( self: Dict , _UpperCAmelCase: List[str]=3 , _UpperCAmelCase: Tuple=224 , _UpperCAmelCase: str=1.0 , _UpperCAmelCase: int=8 , _UpperCAmelCase: str="relu6" , _UpperCAmelCase: str=True , _UpperCAmelCase: str=0.9_9_9 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=0.0_0_1 , **_UpperCAmelCase: Optional[Any] , ):
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase :str = num_channels
_lowerCAmelCase :Union[str, Any] = image_size
_lowerCAmelCase :Any = depth_multiplier
_lowerCAmelCase :List[str] = min_depth
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Optional[int] = tf_padding
_lowerCAmelCase :Optional[int] = classifier_dropout_prob
_lowerCAmelCase :Optional[Any] = initializer_range
_lowerCAmelCase :List[str] = layer_norm_eps
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return 1e-4 | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
from __future__ import annotations
def UpperCamelCase_( __magic_name__ : int | str ):
"""simple docstring"""
_lowerCAmelCase :Dict = str(__magic_name__ )
return n == n[::-1]
def UpperCamelCase_( __magic_name__ : int = 1000000 ):
"""simple docstring"""
_lowerCAmelCase :str = 0
for i in range(1 , __magic_name__ ):
if is_palindrome(__magic_name__ ) and is_palindrome(bin(__magic_name__ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip())))) | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a = None
a = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : bool = True
lowerCamelCase : Optional[str] = None
# Automatically constructed
lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase : str = field(default='Image' , init=snake_case__ , repr=snake_case__ )
def __call__( self: Union[str, Any] ):
return self.pa_type
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Optional[int] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: dict , _UpperCAmelCase: Any=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_lowerCAmelCase :Union[str, Any] = {}
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_lowerCAmelCase :Any = PIL.Image.open(_UpperCAmelCase )
else:
_lowerCAmelCase :str = path.split('::' )[-1]
try:
_lowerCAmelCase :List[str] = string_to_dict(_UpperCAmelCase , config.HUB_DATASETS_URL )['repo_id']
_lowerCAmelCase :int = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_lowerCAmelCase :List[str] = None
with xopen(_UpperCAmelCase , 'rb' , use_auth_token=_UpperCAmelCase ) as f:
_lowerCAmelCase :Optional[int] = BytesIO(f.read() )
_lowerCAmelCase :List[Any] = PIL.Image.open(bytes_ )
else:
_lowerCAmelCase :Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE__ ( self: int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_lowerCAmelCase :Optional[Any] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.binary() )
_lowerCAmelCase :Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCAmelCase :Optional[Any] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() )
_lowerCAmelCase :Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_lowerCAmelCase :List[str] = storage.field('bytes' )
else:
_lowerCAmelCase :Optional[int] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_lowerCAmelCase :Optional[int] = storage.field('path' )
else:
_lowerCAmelCase :Optional[int] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() )
_lowerCAmelCase :Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_lowerCAmelCase :Tuple = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_lowerCAmelCase :Any = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() )
_lowerCAmelCase :Union[str, Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase , self.pa_type )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase: Optional[int] ):
with xopen(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[int] = f.read()
return bytes_
_lowerCAmelCase :Union[str, Any] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowerCAmelCase :Dict = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
_lowerCAmelCase :Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase , self.pa_type )
def UpperCamelCase_( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_lowerCAmelCase :Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCamelCase_( __magic_name__ : "PIL.Image.Image" ):
"""simple docstring"""
_lowerCAmelCase :int = BytesIO()
if image.format in list_image_compression_formats():
_lowerCAmelCase :Any = image.format
else:
_lowerCAmelCase :Dict = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(__magic_name__ , format=__magic_name__ )
return buffer.getvalue()
def UpperCamelCase_( __magic_name__ : "PIL.Image.Image" ):
"""simple docstring"""
if hasattr(__magic_name__ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__magic_name__ )}
def UpperCamelCase_( __magic_name__ : np.ndarray ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_lowerCAmelCase :List[Any] = array.dtype
_lowerCAmelCase :Tuple = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_lowerCAmelCase :Union[str, Any] = dtype.kind
_lowerCAmelCase :int = dtype.itemsize
_lowerCAmelCase :List[str] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_lowerCAmelCase :Union[str, Any] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_lowerCAmelCase :List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_lowerCAmelCase :int = dtype_byteorder + dtype_kind + str(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.dtype(__magic_name__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_lowerCAmelCase :Dict = PIL.Image.fromarray(array.astype(__magic_name__ ) )
return {"path": None, "bytes": image_to_bytes(__magic_name__ )}
def UpperCamelCase_( __magic_name__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = first_non_null_value(__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Any = no_op_if_value_is_null(__magic_name__ )
return [obj_to_image_dict_func(__magic_name__ ) for obj in objs]
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Dict = no_op_if_value_is_null(__magic_name__ )
return [obj_to_image_dict_func(__magic_name__ ) for obj in objs]
else:
return objs
else:
return objs | 687 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a = logging.getLogger(__name__)
def UpperCamelCase_( __magic_name__ : Dict=2 , __magic_name__ : Optional[int]=3 , __magic_name__ : Optional[int]=16 , __magic_name__ : int = 10 , __magic_name__ : int = 2 ):
"""simple docstring"""
def get_dataset(__magic_name__ : List[Any] ):
_lowerCAmelCase :Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__magic_name__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCAmelCase :int = get_dataset(__magic_name__ )
_lowerCAmelCase :Optional[Any] = get_dataset(__magic_name__ )
_lowerCAmelCase :str = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
_lowerCAmelCase :Optional[int] = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ):
"""simple docstring"""
_lowerCAmelCase :Tuple = []
for epoch in range(__magic_name__ ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = batch
_lowerCAmelCase :Tuple = model(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = torch.nn.functional.mse_loss(__magic_name__ , __magic_name__ )
accelerator.backward(__magic_name__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self: Any ):
super().__init__()
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.randn(1 ) )
_lowerCAmelCase :str = nn.Parameter(torch.randn(1 ) )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Tuple ):
return x * self.a + self.b
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase :Optional[int] = DummyModel()
_lowerCAmelCase :Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCAmelCase , _lowerCAmelCase :int = dummy_dataloaders()
_lowerCAmelCase :List[Any] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCAmelCase , automatic_checkpoint_naming=_UpperCAmelCase )
# Train baseline
_lowerCAmelCase :str = Accelerator(project_config=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Dict = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase :Optional[Any] = DummyModel()
_lowerCAmelCase :int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCAmelCase , _lowerCAmelCase :List[str] = dummy_dataloaders()
# Train baseline
_lowerCAmelCase :Dict = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save initial
_lowerCAmelCase :Optional[Any] = os.path.join(_UpperCAmelCase , 'initial' )
accelerator.save_state(_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase)) :Dict = model.a.item(), model.b.item()
_lowerCAmelCase :Optional[Any] = optimizer.state_dict()
_lowerCAmelCase :Union[str, Any] = train(3 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase)) :Optional[Any] = model.a.item(), model.b.item()
_lowerCAmelCase :Optional[int] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowerCAmelCase :Optional[int] = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCAmelCase , _lowerCAmelCase :List[str] = dummy_dataloaders()
_lowerCAmelCase :str = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
accelerator.load_state(_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase)) :Any = model.a.item(), model.b.item()
_lowerCAmelCase :Optional[int] = optimizer.state_dict()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Tuple = train(2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save everything
_lowerCAmelCase :str = os.path.join(_UpperCAmelCase , 'checkpoint' )
accelerator.save_state(_UpperCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCAmelCase )
test_rands += train(1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase)) :Any = model.a.item(), model.b.item()
_lowerCAmelCase :str = optimizer.state_dict()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase :Dict = DummyModel()
_lowerCAmelCase :Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = dummy_dataloaders()
_lowerCAmelCase :Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=_UpperCAmelCase )
# Train baseline
_lowerCAmelCase :Optional[int] = Accelerator(project_dir=_UpperCAmelCase , project_config=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save initial
accelerator.save_state()
((_lowerCAmelCase) , (_lowerCAmelCase)) :Any = model.a.item(), model.b.item()
_lowerCAmelCase :Any = optimizer.state_dict()
_lowerCAmelCase :str = train(3 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase)) :Optional[int] = model.a.item(), model.b.item()
_lowerCAmelCase :Any = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowerCAmelCase :List[Any] = DummyModel()
_lowerCAmelCase :Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCAmelCase , _lowerCAmelCase :List[str] = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=_UpperCAmelCase , project_config=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
accelerator.load_state(os.path.join(_UpperCAmelCase , 'checkpoints' , 'checkpoint_0' ) )
((_lowerCAmelCase) , (_lowerCAmelCase)) :Optional[int] = model.a.item(), model.b.item()
_lowerCAmelCase :str = optimizer.state_dict()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = train(2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCAmelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase)) :List[str] = model.a.item(), model.b.item()
_lowerCAmelCase :str = optimizer.state_dict()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[int] = torch.tensor([1, 2, 3] )
_lowerCAmelCase :Any = torch.tensor([2, 3, 4] )
_lowerCAmelCase :int = DummyModel()
_lowerCAmelCase :int = torch.optim.Adam(net.parameters() )
_lowerCAmelCase :Optional[Any] = Accelerator()
with self.assertRaises(_UpperCAmelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def SCREAMING_SNAKE_CASE__ ( self: str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase :List[str] = DummyModel()
_lowerCAmelCase :Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCAmelCase :Any = torch.optim.lr_scheduler.StepLR(_UpperCAmelCase , step_size=1 , gamma=0.9_9 )
_lowerCAmelCase , _lowerCAmelCase :List[str] = dummy_dataloaders()
_lowerCAmelCase :str = ProjectConfiguration(automatic_checkpoint_naming=_UpperCAmelCase )
# Train baseline
_lowerCAmelCase :Union[str, Any] = Accelerator(project_dir=_UpperCAmelCase , project_config=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :List[str] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save initial
accelerator.save_state()
_lowerCAmelCase :List[str] = scheduler.state_dict()
train(3 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCAmelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCAmelCase , scheduler.state_dict() )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase :Optional[int] = DummyModel()
_lowerCAmelCase :int = ProjectConfiguration(automatic_checkpoint_naming=_UpperCAmelCase , total_limit=2 )
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=_UpperCAmelCase , project_config=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = accelerator.prepare(_UpperCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCAmelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
a = """/tmp/accelerate/state_checkpointing"""
a = DummyModel()
a = torch.optim.Adam(params=model.parameters(), lr=1E-3)
a = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
a , a = dummy_dataloaders()
a = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a , a , a , a , a = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a , a = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
a = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
a = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
a = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 687 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase_ (unittest.TestCase , snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :str = load_tool('text-classification' )
self.tool.setup()
_lowerCAmelCase :Dict = load_tool('text-classification' , remote=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[Any] = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Union[str, Any] = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' ) | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
a = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: List[Any]=None , _UpperCAmelCase: Union[str, Any]="uniform_average" , _UpperCAmelCase: Any=True ):
_lowerCAmelCase :Any = mean_squared_error(
_UpperCAmelCase , _UpperCAmelCase , sample_weight=_UpperCAmelCase , multioutput=_UpperCAmelCase , squared=_UpperCAmelCase )
return {"mse": mse} | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Union[str, Any] , *_UpperCAmelCase: Any , **_UpperCAmelCase: List[Any] ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 687 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
from __future__ import annotations
import requests
a = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : int = 1 , __magic_name__ : str = "new" , __magic_name__ : list | None = None ):
"""simple docstring"""
_lowerCAmelCase :List[str] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__magic_name__ ) - valid_terms ) ):
_lowerCAmelCase :str = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__magic_name__ )
_lowerCAmelCase :str = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
_lowerCAmelCase :int = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__magic_name__ )}
_lowerCAmelCase :Any = {}
for id_ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""])) | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCAmelCase_ (snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = 1
@register_to_config
def __init__( self: Optional[int] , _UpperCAmelCase: int = 1000 , _UpperCAmelCase: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_UpperCAmelCase )
# standard deviation of the initial noise distribution
_lowerCAmelCase :Union[str, Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_lowerCAmelCase :Optional[int] = 4
# running values
_lowerCAmelCase :Dict = []
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, torch.device] = None ):
_lowerCAmelCase :int = num_inference_steps
_lowerCAmelCase :List[Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_lowerCAmelCase :int = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_lowerCAmelCase :List[str] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_lowerCAmelCase :str = torch.sin(steps * math.pi / 2 ) ** 2
_lowerCAmelCase :List[str] = (1.0 - self.betas**2) ** 0.5
_lowerCAmelCase :Optional[int] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_lowerCAmelCase :Tuple = timesteps.to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = []
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: int , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
_lowerCAmelCase :Optional[int] = (self.timesteps == timestep).nonzero().item()
_lowerCAmelCase :List[Any] = timestep_index + 1
_lowerCAmelCase :Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_UpperCAmelCase )
if len(self.ets ) == 1:
_lowerCAmelCase :List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
_lowerCAmelCase :List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_lowerCAmelCase :Optional[int] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_lowerCAmelCase :Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_lowerCAmelCase :Optional[int] = self._get_prev_sample(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: torch.FloatTensor , *_UpperCAmelCase: Optional[int] , **_UpperCAmelCase: Optional[int] ):
return sample
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = self.alphas[timestep_index]
_lowerCAmelCase :Union[str, Any] = self.betas[timestep_index]
_lowerCAmelCase :List[Any] = self.alphas[prev_timestep_index]
_lowerCAmelCase :Any = self.betas[prev_timestep_index]
_lowerCAmelCase :Optional[int] = (sample - sigma * ets) / max(_UpperCAmelCase , 1e-8 )
_lowerCAmelCase :str = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: Optional[Any] ):
return self.config.num_train_timesteps | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : torch.FloatTensor
class UpperCAmelCase_ (snake_case__ , snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self: Any , _UpperCAmelCase: int = 3 , _UpperCAmelCase: int = 3 , _UpperCAmelCase: Tuple[str] = ("DownEncoderBlock2D",) , _UpperCAmelCase: Tuple[str] = ("UpDecoderBlock2D",) , _UpperCAmelCase: Tuple[int] = (64,) , _UpperCAmelCase: int = 1 , _UpperCAmelCase: str = "silu" , _UpperCAmelCase: int = 3 , _UpperCAmelCase: int = 32 , _UpperCAmelCase: int = 256 , _UpperCAmelCase: int = 32 , _UpperCAmelCase: Optional[int] = None , _UpperCAmelCase: float = 0.1_8_2_1_5 , _UpperCAmelCase: str = "group" , ):
super().__init__()
# pass init params to Encoder
_lowerCAmelCase :Union[str, Any] = Encoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , )
_lowerCAmelCase :str = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCAmelCase :Optional[int] = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 )
_lowerCAmelCase :str = VectorQuantizer(_UpperCAmelCase , _UpperCAmelCase , beta=0.2_5 , remap=_UpperCAmelCase , sane_index_shape=_UpperCAmelCase )
_lowerCAmelCase :Dict = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 )
# pass init params to Decoder
_lowerCAmelCase :str = Decoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , norm_type=_UpperCAmelCase , )
@apply_forward_hook
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: bool = True ):
_lowerCAmelCase :int = self.encoder(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = self.quant_conv(_UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_UpperCAmelCase )
@apply_forward_hook
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: bool = False , _UpperCAmelCase: bool = True ):
# also go through quantization layer
if not force_not_quantize:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = self.quantize(_UpperCAmelCase )
else:
_lowerCAmelCase :int = h
_lowerCAmelCase :Union[str, Any] = self.post_quant_conv(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.decoder(_UpperCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: bool = True ):
_lowerCAmelCase :Tuple = sample
_lowerCAmelCase :List[str] = self.encode(_UpperCAmelCase ).latents
_lowerCAmelCase :Dict = self.decode(_UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase ) | 687 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int = 100 ):
"""simple docstring"""
_lowerCAmelCase :List[str] = set()
_lowerCAmelCase :Any = 0
_lowerCAmelCase :Union[str, Any] = n + 1 # maximum limit
for a in range(2 , __magic_name__ ):
for b in range(2 , __magic_name__ ):
_lowerCAmelCase :Tuple = a**b # calculates the current power
collect_powers.add(__magic_name__ ) # adds the result to the set
return len(__magic_name__ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip()))) | 687 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 1 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: int = 6 ):
_lowerCAmelCase :Node | None = None
_lowerCAmelCase :Node | None = None
self.create_linked_list(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: int ):
_lowerCAmelCase :str = Node()
_lowerCAmelCase :Optional[Any] = current_node
_lowerCAmelCase :Dict = current_node
_lowerCAmelCase :Dict = current_node
for _ in range(1 , _UpperCAmelCase ):
_lowerCAmelCase :List[str] = Node()
_lowerCAmelCase :Tuple = current_node
_lowerCAmelCase :List[str] = previous_node
_lowerCAmelCase :Any = current_node
_lowerCAmelCase :Union[str, Any] = self.front
_lowerCAmelCase :Any = previous_node
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowerCAmelCase :str = self.rear.next
if self.rear:
_lowerCAmelCase :Union[str, Any] = data
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowerCAmelCase :Dict = self.front.data
_lowerCAmelCase :Dict = None
return data
_lowerCAmelCase :Optional[Any] = self.front
_lowerCAmelCase :Any = old_front.next
_lowerCAmelCase :Union[str, Any] = old_front.data
_lowerCAmelCase :Dict = None
return data
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
if self.is_empty():
raise Exception('Empty Queue' )
def SCREAMING_SNAKE_CASE__ ( self: int ):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Optional[Any] ):
_lowerCAmelCase :Any | None = None
_lowerCAmelCase :Node | None = None
_lowerCAmelCase :Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = 'levit'
def __init__( self: Dict , _UpperCAmelCase: List[Any]=224 , _UpperCAmelCase: List[str]=3 , _UpperCAmelCase: Dict=3 , _UpperCAmelCase: Any=2 , _UpperCAmelCase: Optional[Any]=1 , _UpperCAmelCase: List[Any]=16 , _UpperCAmelCase: Optional[Any]=[128, 256, 384] , _UpperCAmelCase: Tuple=[4, 8, 12] , _UpperCAmelCase: List[str]=[4, 4, 4] , _UpperCAmelCase: Optional[Any]=[16, 16, 16] , _UpperCAmelCase: int=0 , _UpperCAmelCase: Optional[int]=[2, 2, 2] , _UpperCAmelCase: List[str]=[2, 2, 2] , _UpperCAmelCase: Dict=0.0_2 , **_UpperCAmelCase: int , ):
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = image_size
_lowerCAmelCase :List[str] = num_channels
_lowerCAmelCase :Optional[Any] = kernel_size
_lowerCAmelCase :Union[str, Any] = stride
_lowerCAmelCase :List[Any] = padding
_lowerCAmelCase :Dict = hidden_sizes
_lowerCAmelCase :Optional[int] = num_attention_heads
_lowerCAmelCase :Tuple = depths
_lowerCAmelCase :str = key_dim
_lowerCAmelCase :List[Any] = drop_path_rate
_lowerCAmelCase :Tuple = patch_size
_lowerCAmelCase :Optional[int] = attention_ratio
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Tuple = initializer_range
_lowerCAmelCase :Tuple = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[int] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return 1e-4 | 687 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def UpperCamelCase_( __magic_name__ : List[Any]=True ):
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=snake_case__ ) )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : Tuple = None
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
_lowerCAmelCase :Dict = dataset_module_factory(_UpperCAmelCase , cache_dir=_UpperCAmelCase )
_lowerCAmelCase :Any = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase )
_lowerCAmelCase :DatasetBuilder = builder_cls(
cache_dir=_UpperCAmelCase , config_name=_UpperCAmelCase , hash=dataset_module.hash , )
_lowerCAmelCase :List[Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_UpperCAmelCase ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_lowerCAmelCase :List[str] = cached_path(_UpperCAmelCase , cache_dir=_UpperCAmelCase )
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
@pytest.mark.integration
def UpperCamelCase_( __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_lowerCAmelCase :List[str] = dataset_module_factory('wikipedia' , cache_dir=__magic_name__ )
_lowerCAmelCase :Union[str, Any] = import_main_class(dataset_module.module_path )
_lowerCAmelCase :DatasetBuilder = builder_cls(
cache_dir=__magic_name__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_lowerCAmelCase :Optional[Any] = None
builder_instance.download_and_prepare()
_lowerCAmelCase :Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :Tuple = dataset_module_factory('wikipedia' , cache_dir=__magic_name__ )
_lowerCAmelCase :List[str] = import_main_class(dataset_module.module_path , dataset=__magic_name__ )
_lowerCAmelCase :DatasetBuilder = builder_cls(
cache_dir=__magic_name__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_lowerCAmelCase :int = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__magic_name__ , __magic_name__ )
assert "train" in ds
assert isinstance(ds['train'] , __magic_name__ )
assert next(iter(ds['train'] ) ) | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
a = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
a = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Tuple = set()
_lowerCAmelCase :str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase :Optional[Any] = char
_lowerCAmelCase :Any = set(__magic_name__ )
return pairs
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: List[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Any , _UpperCAmelCase: str="<s>" , _UpperCAmelCase: Optional[Any]="</s>" , _UpperCAmelCase: Dict="</s>" , _UpperCAmelCase: Optional[int]="<s>" , _UpperCAmelCase: List[str]="<unk>" , _UpperCAmelCase: str="<pad>" , _UpperCAmelCase: str="<mask>" , **_UpperCAmelCase: List[str] , ):
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase :Union[str, Any] = vocab_file
_lowerCAmelCase :List[Any] = merges_file
_lowerCAmelCase :Union[str, Any] = {}
_lowerCAmelCase :Optional[int] = 0
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :str = 3
self.add_from_file(_UpperCAmelCase )
_lowerCAmelCase :str = {v: k for k, v in self.encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle:
_lowerCAmelCase :Tuple = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase :int = [tuple(merge.split()[:-1] ) for merge in merges]
_lowerCAmelCase :Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase :Union[str, Any] = {}
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: List[int] , _UpperCAmelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase :int = [self.cls_token_id]
_lowerCAmelCase :str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: List[int] , _UpperCAmelCase: Optional[List[int]] = None , _UpperCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: List[int] , _UpperCAmelCase: Optional[List[int]] = None ):
_lowerCAmelCase :Dict = [self.sep_token_id]
_lowerCAmelCase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[Any] ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase :Dict = tuple(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
_lowerCAmelCase :Optional[int] = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
_lowerCAmelCase :List[str] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase :int = bigram
_lowerCAmelCase :Union[str, Any] = []
_lowerCAmelCase :List[str] = 0
while i < len(_UpperCAmelCase ):
try:
_lowerCAmelCase :int = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase :Optional[int] = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase :Union[str, Any] = tuple(_UpperCAmelCase )
_lowerCAmelCase :Tuple = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
_lowerCAmelCase :List[str] = get_pairs(_UpperCAmelCase )
_lowerCAmelCase :Tuple = '@@ '.join(_UpperCAmelCase )
_lowerCAmelCase :Dict = word[:-4]
_lowerCAmelCase :List[str] = word
return word
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Dict ):
_lowerCAmelCase :List[Any] = []
_lowerCAmelCase :Optional[int] = re.findall(r'\S+\n?' , _UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_UpperCAmelCase ).split(' ' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int ):
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Any = ' '.join(_UpperCAmelCase ).replace('@@ ' , '' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: str , _UpperCAmelCase: Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase :int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase :Union[str, Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.merges_file , _UpperCAmelCase )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
_lowerCAmelCase :str = f.readlines()
for lineTmp in lines:
_lowerCAmelCase :str = lineTmp.strip()
_lowerCAmelCase :Tuple = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
_lowerCAmelCase :Optional[int] = line[:idx]
_lowerCAmelCase :Dict = len(self.encoder ) | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'depth_multiplier' ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Dict , _UpperCAmelCase: Any , _UpperCAmelCase: Any=13 , _UpperCAmelCase: Any=3 , _UpperCAmelCase: Dict=32 , _UpperCAmelCase: Tuple=0.2_5 , _UpperCAmelCase: List[Any]=8 , _UpperCAmelCase: Dict=8 , _UpperCAmelCase: Dict=6 , _UpperCAmelCase: List[str]=32 , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Optional[Any]="relu6" , _UpperCAmelCase: Union[str, Any]=1280 , _UpperCAmelCase: Any=0.1 , _UpperCAmelCase: Dict=0.0_2 , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: int=10 , _UpperCAmelCase: List[Any]=None , ):
_lowerCAmelCase :List[Any] = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :str = num_channels
_lowerCAmelCase :Dict = image_size
_lowerCAmelCase :Dict = depth_multiplier
_lowerCAmelCase :List[str] = depth_divisible_by
_lowerCAmelCase :str = min_depth
_lowerCAmelCase :Tuple = expand_ratio
_lowerCAmelCase :Any = tf_padding
_lowerCAmelCase :int = output_stride
_lowerCAmelCase :Any = first_layer_is_expansion
_lowerCAmelCase :List[str] = finegrained_output
_lowerCAmelCase :int = hidden_act
_lowerCAmelCase :Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_lowerCAmelCase :int = classifier_dropout_prob
_lowerCAmelCase :Dict = use_labels
_lowerCAmelCase :Optional[int] = is_training
_lowerCAmelCase :Tuple = num_labels
_lowerCAmelCase :str = initializer_range
_lowerCAmelCase :List[Any] = scope
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :str = None
_lowerCAmelCase :Optional[Any] = None
if self.use_labels:
_lowerCAmelCase :Tuple = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase :int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :Dict = MobileNetVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Any , _UpperCAmelCase: str ):
_lowerCAmelCase :Tuple = self.num_labels
_lowerCAmelCase :Union[str, Any] = MobileNetVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: int , _UpperCAmelCase: Tuple , _UpperCAmelCase: str , _UpperCAmelCase: List[Any] ):
_lowerCAmelCase :List[str] = self.num_labels
_lowerCAmelCase :int = MobileNetVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase :Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Dict = config_and_inputs
_lowerCAmelCase :int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : Any = False
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :int = MobileNetVaModelTester(self )
_lowerCAmelCase :Optional[int] = MobileNetVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
_lowerCAmelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :Tuple = [*signature.parameters.keys()]
_lowerCAmelCase :Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
def check_hidden_states_output(_UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: int ):
_lowerCAmelCase :str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :Dict = outputs.hidden_states
_lowerCAmelCase :str = 16
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase :Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[str] = MobileNetVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Dict = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_UpperCAmelCase )
_lowerCAmelCase :Any = self.default_image_processor
_lowerCAmelCase :List[str] = prepare_img()
_lowerCAmelCase :Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Any = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :List[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Any = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_lowerCAmelCase :Tuple = model.to(_UpperCAmelCase )
_lowerCAmelCase :str = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_lowerCAmelCase :str = prepare_img()
_lowerCAmelCase :Union[str, Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Tuple = model(**_UpperCAmelCase )
_lowerCAmelCase :List[Any] = outputs.logits
# verify the logits
_lowerCAmelCase :Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _UpperCAmelCase )
_lowerCAmelCase :int = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) ) | 687 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
a = F'''https://www.google.com/search?q={query}&num=100'''
a = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
a = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
a = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link) | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a = TypeVar("""T""")
class UpperCAmelCase_ (Generic[T] ):
"""simple docstring"""
def __init__( self: Optional[int] , _UpperCAmelCase: bool = True ):
_lowerCAmelCase :dict[T, list[T]] = {} # dictionary of lists
_lowerCAmelCase :int = directed
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: T , _UpperCAmelCase: T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
self.adj_list[destination_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
_lowerCAmelCase :Dict = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase :Tuple = [destination_vertex]
_lowerCAmelCase :Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase :int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase :Any = [destination_vertex]
_lowerCAmelCase :List[Any] = []
return self
def __repr__( self: List[Any] ):
return pformat(self.adj_list ) | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] ):
return None
class UpperCAmelCase_ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: List[Any] ):
return None
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCAmelCase , 'tf' , 12 , **_UpperCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self: str ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCAmelCase , 'pt' , 12 , **_UpperCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
from transformers import BertModel
_lowerCAmelCase :Optional[int] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(_UpperCAmelCase ) )
vocab_file.flush()
_lowerCAmelCase :int = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowerCAmelCase :Optional[Any] = BertModel(BertConfig(vocab_size=len(_UpperCAmelCase ) ) )
model.save_pretrained(_UpperCAmelCase )
self._test_export(_UpperCAmelCase , 'pt' , 12 , _UpperCAmelCase )
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCAmelCase :Dict = self._test_export(_UpperCAmelCase , 'tf' , 12 , **_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = quantize(Path(_UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCAmelCase :Union[str, Any] = self._test_export(_UpperCAmelCase , 'pt' , 12 , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = quantize(_UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int=None , **_UpperCAmelCase: Optional[int] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCAmelCase :List[Any] = Path(_UpperCAmelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return path
except Exception as e:
self.fail(_UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
from transformers import BertModel
_lowerCAmelCase :List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_lowerCAmelCase :str = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_UpperCAmelCase , _UpperCAmelCase , 'pt' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
from transformers import TFBertModel
_lowerCAmelCase :int = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_lowerCAmelCase :Dict = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_UpperCAmelCase , _UpperCAmelCase , 'tf' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Any , _UpperCAmelCase: str ):
_lowerCAmelCase :Tuple = FeatureExtractionPipeline(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Tuple = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = infer_shapes(_UpperCAmelCase , _UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] , _UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Dict = ['input_ids', 'attention_mask', 'token_type_ids']
_lowerCAmelCase :Optional[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
_lowerCAmelCase , _lowerCAmelCase :int = ensure_valid_input(FuncContiguousArgs() , _UpperCAmelCase , _UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_UpperCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_UpperCAmelCase ) , set(_UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_UpperCAmelCase , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , _UpperCAmelCase , _UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_UpperCAmelCase ) , 1 )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[str] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() ) | 687 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a = logging.get_logger(__name__)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_lowerCAmelCase :Tuple = json.loads(__magic_name__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_lowerCAmelCase :Union[str, Any] = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_lowerCAmelCase :Any = json.loads(__magic_name__ )
if not mpi_options.get('sagemaker_mpi_enabled' , __magic_name__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _UpperCAmelCase , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: str ):
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
_lowerCAmelCase :str = torch.device('cpu' )
_lowerCAmelCase :List[str] = 0
elif is_sagemaker_model_parallel_available():
_lowerCAmelCase :Any = smp.local_rank()
_lowerCAmelCase :int = torch.device('cuda' , _UpperCAmelCase )
_lowerCAmelCase :str = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
_lowerCAmelCase :List[str] = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
_lowerCAmelCase :List[Any] = torch.device('cuda' , self.local_rank )
_lowerCAmelCase :Tuple = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_lowerCAmelCase :Tuple = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_lowerCAmelCase :Tuple = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
_lowerCAmelCase :Any = torch.device('cuda' , self.local_rank )
_lowerCAmelCase :Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(_UpperCAmelCase )
return device
@property
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return False | 687 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a = logging.get_logger(__name__)
a = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
a = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
a = {
"""RUCAIBox/mvp""": 1_024,
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Tuple = ['input_ids', 'attention_mask']
lowerCamelCase : List[Any] = MvpTokenizer
def __init__( self: List[Any] , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: List[str]=None , _UpperCAmelCase: List[str]=None , _UpperCAmelCase: Dict="replace" , _UpperCAmelCase: List[Any]="<s>" , _UpperCAmelCase: Tuple="</s>" , _UpperCAmelCase: Any="</s>" , _UpperCAmelCase: Dict="<s>" , _UpperCAmelCase: Any="<unk>" , _UpperCAmelCase: Tuple="<pad>" , _UpperCAmelCase: Any="<mask>" , _UpperCAmelCase: Optional[int]=False , _UpperCAmelCase: List[Any]=True , **_UpperCAmelCase: Any , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase :Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
_lowerCAmelCase :Tuple = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) )
_lowerCAmelCase :int = add_prefix_space
_lowerCAmelCase :str = pre_tok_class(**_UpperCAmelCase )
_lowerCAmelCase :str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase :List[str] = 'post_processor'
_lowerCAmelCase :Union[str, Any] = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
_lowerCAmelCase :Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase :Dict = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase :Tuple = tuple(state['cls'] )
_lowerCAmelCase :Tuple = False
if state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
_lowerCAmelCase :List[Any] = add_prefix_space
_lowerCAmelCase :List[str] = True
if state.get('trim_offsets' , _UpperCAmelCase ) != trim_offsets:
_lowerCAmelCase :str = trim_offsets
_lowerCAmelCase :List[Any] = True
if changes_to_apply:
_lowerCAmelCase :str = getattr(_UpperCAmelCase , state.pop('type' ) )
_lowerCAmelCase :Any = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
_lowerCAmelCase :List[Any] = value
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , *_UpperCAmelCase: Optional[int] , **_UpperCAmelCase: List[Any] ):
_lowerCAmelCase :Tuple = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , *_UpperCAmelCase: Optional[Any] , **_UpperCAmelCase: Any ):
_lowerCAmelCase :str = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: str , _UpperCAmelCase: Optional[str] = None ):
_lowerCAmelCase :Tuple = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: Any=None ):
_lowerCAmelCase :Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: List[int] , _UpperCAmelCase: Optional[List[int]] = None ):
_lowerCAmelCase :List[Any] = [self.sep_token_id]
_lowerCAmelCase :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('Input value must be a \'int\' type' )
return bin(__magic_name__ ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
a = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
a = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
a = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : bool , __magic_name__ : Optional[Dict[int, int]] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCAmelCase :Dict = new_id
# turn into Numpy arrays
_lowerCAmelCase :Tuple = np.array(__magic_name__ )
_lowerCAmelCase :List[str] = np.array(__magic_name__ )
if reduce_labels:
_lowerCAmelCase :List[Any] = 255
_lowerCAmelCase :Optional[Any] = label - 1
_lowerCAmelCase :Union[str, Any] = 255
_lowerCAmelCase :str = label != ignore_index
_lowerCAmelCase :List[str] = np.not_equal(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = pred_label[mask]
_lowerCAmelCase :Dict = np.array(__magic_name__ )[mask]
_lowerCAmelCase :List[Any] = pred_label[pred_label == label]
_lowerCAmelCase :Dict = np.histogram(__magic_name__ , bins=__magic_name__ , range=(0, num_labels - 1) )[0]
_lowerCAmelCase :Optional[int] = np.histogram(__magic_name__ , bins=__magic_name__ , range=(0, num_labels - 1) )[0]
_lowerCAmelCase :Any = np.histogram(__magic_name__ , bins=__magic_name__ , range=(0, num_labels - 1) )[0]
_lowerCAmelCase :List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCamelCase_( __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : bool , __magic_name__ : Optional[Dict[int, int]] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
_lowerCAmelCase :Any = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCAmelCase :List[str] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCAmelCase :Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCAmelCase :Tuple = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__magic_name__ , __magic_name__ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = intersect_and_union(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : bool , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[Dict[int, int]] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Optional[int] = total_intersect_and_union(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# compute metrics
_lowerCAmelCase :Optional[int] = {}
_lowerCAmelCase :List[str] = total_area_intersect.sum() / total_area_label.sum()
_lowerCAmelCase :Dict = total_area_intersect / total_area_union
_lowerCAmelCase :List[str] = total_area_intersect / total_area_label
_lowerCAmelCase :Optional[Any] = np.nanmean(__magic_name__ )
_lowerCAmelCase :List[str] = np.nanmean(__magic_name__ )
_lowerCAmelCase :Optional[Any] = all_acc
_lowerCAmelCase :str = iou
_lowerCAmelCase :str = acc
if nan_to_num is not None:
_lowerCAmelCase :Optional[int] = {metric: np.nan_to_num(__magic_name__ , nan=__magic_name__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Dict , _UpperCAmelCase: Dict , _UpperCAmelCase: int , _UpperCAmelCase: bool , _UpperCAmelCase: Optional[int] = None , _UpperCAmelCase: Optional[Dict[int, int]] = None , _UpperCAmelCase: bool = False , ):
_lowerCAmelCase :Union[str, Any] = mean_iou(
results=_UpperCAmelCase , gt_seg_maps=_UpperCAmelCase , num_labels=_UpperCAmelCase , ignore_index=_UpperCAmelCase , nan_to_num=_UpperCAmelCase , label_map=_UpperCAmelCase , reduce_labels=_UpperCAmelCase , )
return iou_result | 687 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a = logging.get_logger(__name__)
a = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = 'layoutlmv3'
def __init__( self: Any , _UpperCAmelCase: Any=5_0265 , _UpperCAmelCase: Tuple=768 , _UpperCAmelCase: Optional[int]=12 , _UpperCAmelCase: str=12 , _UpperCAmelCase: int=3072 , _UpperCAmelCase: Tuple="gelu" , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: Optional[int]=0.1 , _UpperCAmelCase: Optional[Any]=512 , _UpperCAmelCase: Union[str, Any]=2 , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Tuple=1e-5 , _UpperCAmelCase: Dict=1 , _UpperCAmelCase: Dict=0 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Dict=1024 , _UpperCAmelCase: List[str]=128 , _UpperCAmelCase: List[Any]=128 , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Any=32 , _UpperCAmelCase: str=128 , _UpperCAmelCase: Optional[Any]=64 , _UpperCAmelCase: Optional[int]=256 , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=True , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: List[str]=224 , _UpperCAmelCase: str=3 , _UpperCAmelCase: List[Any]=16 , _UpperCAmelCase: int=None , **_UpperCAmelCase: Tuple , ):
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase :List[str] = max_ad_position_embeddings
_lowerCAmelCase :Dict = coordinate_size
_lowerCAmelCase :Tuple = shape_size
_lowerCAmelCase :List[Any] = has_relative_attention_bias
_lowerCAmelCase :str = rel_pos_bins
_lowerCAmelCase :Union[str, Any] = max_rel_pos
_lowerCAmelCase :Union[str, Any] = has_spatial_attention_bias
_lowerCAmelCase :List[Any] = rel_ad_pos_bins
_lowerCAmelCase :Optional[Any] = max_rel_ad_pos
_lowerCAmelCase :Any = text_embed
_lowerCAmelCase :Tuple = visual_embed
_lowerCAmelCase :List[Any] = input_size
_lowerCAmelCase :Dict = num_channels
_lowerCAmelCase :Optional[int] = patch_size
_lowerCAmelCase :Dict = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = version.parse('1.12' )
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return 12
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: "ProcessorMixin" , _UpperCAmelCase: int = -1 , _UpperCAmelCase: int = -1 , _UpperCAmelCase: bool = False , _UpperCAmelCase: Optional["TensorType"] = None , _UpperCAmelCase: int = 3 , _UpperCAmelCase: int = 40 , _UpperCAmelCase: int = 40 , ):
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase :Optional[Any] = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase :Optional[int] = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
_lowerCAmelCase :Any = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase :Optional[Any] = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_lowerCAmelCase :Optional[int] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_lowerCAmelCase :Union[str, Any] = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs | 687 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple = BartphoTokenizer
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
super().setUp()
_lowerCAmelCase :int = ['▁This', '▁is', '▁a', '▁t', 'est']
_lowerCAmelCase :str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase :List[str] = {'unk_token': '<unk>'}
_lowerCAmelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
_lowerCAmelCase :Optional[int] = BartphoTokenizer(_UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , **_UpperCAmelCase: Dict ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :int = 'This is a là test'
_lowerCAmelCase :Optional[int] = 'This is a<unk><unk> test'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Any = BartphoTokenizer(_UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
_lowerCAmelCase :Dict = 'This is a là test'
_lowerCAmelCase :Dict = '▁This ▁is ▁a ▁l à ▁t est'.split()
_lowerCAmelCase :int = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase :int = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int = 10**12 ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = 1
_lowerCAmelCase :str = 0
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Any = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''') | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
def UpperCamelCase_( __magic_name__ : float ):
"""simple docstring"""
return 10 - x * x
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
if equation(__magic_name__ ) * equation(__magic_name__ ) >= 0:
raise ValueError('Wrong space!' )
_lowerCAmelCase :str = a
while (b - a) >= 0.01:
# Find middle point
_lowerCAmelCase :int = (a + b) / 2
# Check if middle point is root
if equation(__magic_name__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__magic_name__ ) * equation(__magic_name__ ) < 0:
_lowerCAmelCase :List[str] = c
else:
_lowerCAmelCase :List[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 687 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
a = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
a = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
a = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
a = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
a = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
a = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :int = randrange(len(__magic_name__ ) ), randrange(len(__magic_name__ ) )
_lowerCAmelCase :Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_lowerCAmelCase , _lowerCAmelCase :List[str] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase_( __magic_name__ : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(__magic_name__ ))
@pytest.mark.parametrize('hand, expected' , __magic_name__ )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : str ):
"""simple docstring"""
assert PokerHand(__magic_name__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __magic_name__ )
def UpperCamelCase_( __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
"""simple docstring"""
assert PokerHand(__magic_name__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __magic_name__ )
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = PokerHand(__magic_name__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __magic_name__ )
def UpperCamelCase_( __magic_name__ : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
assert PokerHand(__magic_name__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __magic_name__ )
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Any ):
"""simple docstring"""
assert PokerHand(__magic_name__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __magic_name__ )
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ):
"""simple docstring"""
assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
assert PokerHand(__magic_name__ ).compare_with(PokerHand(__magic_name__ ) ) == expected
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Any = [PokerHand(__magic_name__ ) for hand in SORTED_HANDS]
_lowerCAmelCase :Optional[int] = poker_hands.copy()
shuffle(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = chain(sorted(__magic_name__ ) )
for index, hand in enumerate(__magic_name__ ):
assert hand == poker_hands[index]
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :int = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__magic_name__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Dict = PokerHand('2C 4S AS 3D 5C' )
_lowerCAmelCase :Any = True
_lowerCAmelCase :List[str] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = 0
_lowerCAmelCase :int = os.path.abspath(os.path.dirname(__magic_name__ ) )
_lowerCAmelCase :Union[str, Any] = os.path.join(__magic_name__ , 'poker_hands.txt' )
with open(__magic_name__ ) as file_hand:
for line in file_hand:
_lowerCAmelCase :Tuple = line[:14].strip()
_lowerCAmelCase :Union[str, Any] = line[15:].strip()
_lowerCAmelCase , _lowerCAmelCase :str = PokerHand(__magic_name__ ), PokerHand(__magic_name__ )
_lowerCAmelCase :int = player.compare_with(__magic_name__ )
if output == "Win":
answer += 1
assert answer == 376 | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
_lowerCAmelCase :List[Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_lowerCAmelCase :Optional[int] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=__magic_name__ , output_all_encodings=__magic_name__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , __magic_name__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_lowerCAmelCase :str = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
_lowerCAmelCase :Optional[Any] = os.path.join(get_home_dir() , 'models' )
_lowerCAmelCase :Union[str, Any] = _load_vocab(__magic_name__ , __magic_name__ , __magic_name__ , cls=__magic_name__ )
_lowerCAmelCase :Tuple = nlp.model.BERTModel(
__magic_name__ , len(__magic_name__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=__magic_name__ , use_token_type_embed=__magic_name__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=__magic_name__ , use_decoder=__magic_name__ , )
original_bort.load_parameters(__magic_name__ , cast_dtype=__magic_name__ , ignore_extra=__magic_name__ )
_lowerCAmelCase :List[str] = original_bort._collect_params_with_prefix()
# Build our config 🤗
_lowerCAmelCase :Dict = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(__magic_name__ ),
}
_lowerCAmelCase :Tuple = BertConfig.from_dict(__magic_name__ )
_lowerCAmelCase :Optional[int] = BertForMaskedLM(__magic_name__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__magic_name__ : Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
_lowerCAmelCase :str = hf_param.shape
_lowerCAmelCase :List[Any] = to_torch(params[gluon_param] )
_lowerCAmelCase :str = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
_lowerCAmelCase :str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
_lowerCAmelCase :Dict = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
_lowerCAmelCase :List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
_lowerCAmelCase :Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_lowerCAmelCase :Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_lowerCAmelCase :BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
_lowerCAmelCase :BertSelfAttention = layer.attention.self
_lowerCAmelCase :Any = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
_lowerCAmelCase :int = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
_lowerCAmelCase :str = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
_lowerCAmelCase :Tuple = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
_lowerCAmelCase :Dict = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
_lowerCAmelCase :Tuple = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
_lowerCAmelCase :BertSelfOutput = layer.attention.output
_lowerCAmelCase :int = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
_lowerCAmelCase :int = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
_lowerCAmelCase :Dict = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
_lowerCAmelCase :List[str] = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
_lowerCAmelCase :BertIntermediate = layer.intermediate
_lowerCAmelCase :List[Any] = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
_lowerCAmelCase :List[Any] = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
_lowerCAmelCase :BertOutput = layer.output
_lowerCAmelCase :Tuple = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
_lowerCAmelCase :Tuple = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
_lowerCAmelCase :Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
_lowerCAmelCase :Optional[int] = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_lowerCAmelCase :List[str] = RobertaTokenizer.from_pretrained('roberta-base' )
_lowerCAmelCase :str = tokenizer.encode_plus(__magic_name__ )['input_ids']
# Get gluon output
_lowerCAmelCase :List[str] = mx.nd.array([input_ids] )
_lowerCAmelCase :Any = original_bort(inputs=__magic_name__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__magic_name__ )
_lowerCAmelCase :Any = BertModel.from_pretrained(__magic_name__ )
hf_bort_model.eval()
_lowerCAmelCase :Union[str, Any] = tokenizer.encode_plus(__magic_name__ , return_tensors='pt' )
_lowerCAmelCase :str = hf_bort_model(**__magic_name__ )[0]
_lowerCAmelCase :Union[str, Any] = output_gluon[0].asnumpy()
_lowerCAmelCase :Dict = output_hf[0].detach().numpy()
_lowerCAmelCase :Optional[int] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_lowerCAmelCase :Union[str, Any] = np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , __magic_name__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 687 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self: Optional[Any] , _UpperCAmelCase: int = 1 , _UpperCAmelCase: int = 100 , _UpperCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase: Optional[float] = None , _UpperCAmelCase: bool = True , ):
if audio_length_in_s is None:
_lowerCAmelCase :Any = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCAmelCase :Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
_lowerCAmelCase :Tuple = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
_lowerCAmelCase :str = int(_UpperCAmelCase )
if sample_size % down_scale_factor != 0:
_lowerCAmelCase :Dict = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
_lowerCAmelCase :List[Any] = int(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = next(iter(self.unet.parameters() ) ).dtype
_lowerCAmelCase :int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_lowerCAmelCase :Tuple = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase , device=audio.device )
_lowerCAmelCase :Tuple = self.scheduler.timesteps.to(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase :Optional[int] = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCAmelCase :int = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
_lowerCAmelCase :Optional[int] = audio.clamp(-1 , 1 ).float().cpu().numpy()
_lowerCAmelCase :Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_UpperCAmelCase ) | 687 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : UNetaDModel
lowerCamelCase : KarrasVeScheduler
def __init__( self: Optional[Any] , _UpperCAmelCase: UNetaDModel , _UpperCAmelCase: KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self: int , _UpperCAmelCase: int = 1 , _UpperCAmelCase: int = 50 , _UpperCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , **_UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Any = self.unet.config.sample_size
_lowerCAmelCase :Dict = (batch_size, 3, img_size, img_size)
_lowerCAmelCase :Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCAmelCase :List[Any] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowerCAmelCase :List[str] = self.scheduler.schedule[t]
_lowerCAmelCase :Dict = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase :Dict = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCAmelCase :Tuple = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase :Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_lowerCAmelCase :List[str] = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
_lowerCAmelCase :Optional[Any] = step_output.prev_sample
_lowerCAmelCase :int = (sample / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :Optional[int] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase ) | 687 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowerCAmelCase :Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase_( __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase :Union[str, Any] = ''
else:
_lowerCAmelCase :str = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase :str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_lowerCAmelCase :Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase :Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase :List[Any] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase :Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase :List[Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase :Tuple = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :List[str] = dct.pop(__magic_name__ )
_lowerCAmelCase :Tuple = val
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase :Union[str, Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
_lowerCAmelCase :Dict = DeiTConfig()
# all deit models have fine-tuned heads
_lowerCAmelCase :Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase :Union[str, Any] = 1000
_lowerCAmelCase :Tuple = 'huggingface/label-files'
_lowerCAmelCase :Dict = 'imagenet-1k-id2label.json'
_lowerCAmelCase :Optional[int] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase :Any = {int(__magic_name__ ): v for k, v in idalabel.items()}
_lowerCAmelCase :str = idalabel
_lowerCAmelCase :str = {v: k for k, v in idalabel.items()}
_lowerCAmelCase :Optional[Any] = int(deit_name[-6:-4] )
_lowerCAmelCase :List[str] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
_lowerCAmelCase :Dict = 192
_lowerCAmelCase :List[Any] = 768
_lowerCAmelCase :List[Any] = 12
_lowerCAmelCase :List[Any] = 3
elif deit_name[9:].startswith('small' ):
_lowerCAmelCase :str = 384
_lowerCAmelCase :Union[str, Any] = 1536
_lowerCAmelCase :str = 12
_lowerCAmelCase :List[str] = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
_lowerCAmelCase :Optional[int] = 1024
_lowerCAmelCase :Optional[int] = 4096
_lowerCAmelCase :List[Any] = 24
_lowerCAmelCase :Any = 16
# load original model from timm
_lowerCAmelCase :Tuple = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase :Union[str, Any] = timm_model.state_dict()
_lowerCAmelCase :Tuple = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
# load HuggingFace model
_lowerCAmelCase :Optional[int] = DeiTForImageClassificationWithTeacher(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowerCAmelCase :Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowerCAmelCase :Union[str, Any] = DeiTImageProcessor(size=__magic_name__ , crop_size=config.image_size )
_lowerCAmelCase :int = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase :List[str] = encoding['pixel_values']
_lowerCAmelCase :List[str] = model(__magic_name__ )
_lowerCAmelCase :List[Any] = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1e-3 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path) | 687 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a = re.compile(R"""\s+""")
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCamelCase_( __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = [len(__magic_name__ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Any = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : List[Any]=5 ):
"""simple docstring"""
_lowerCAmelCase :Tuple = ['auto-generated', 'autogenerated', 'automatically generated']
_lowerCAmelCase :Optional[int] = example['content'].splitlines()
for _, line in zip(range(__magic_name__ ) , __magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=0.05 ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = ['unit tests', 'test file', 'configuration file']
_lowerCAmelCase :Union[str, Any] = example['content'].splitlines()
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :str = 0
# first test
for _, line in zip(range(__magic_name__ ) , __magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCAmelCase :str = example['content'].count('\n' )
_lowerCAmelCase :List[str] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :Dict = ['def ', 'class ', 'for ', 'while ']
_lowerCAmelCase :Tuple = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=4 ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = example['content'].splitlines()
_lowerCAmelCase :List[Any] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :int = tokenizer(example['content'] , truncation=__magic_name__ )['input_ids']
_lowerCAmelCase :List[Any] = len(example['content'] ) / len(__magic_name__ )
return {"ratio": ratio}
def UpperCamelCase_( __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :str = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
if not check_uniques(__magic_name__ , __magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
with open(__magic_name__ , 'rb' ) as f_in:
with gzip.open(str(__magic_name__ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
os.unlink(__magic_name__ )
# Settings
a = HfArgumentParser(PreprocessingArguments)
a = parser.parse_args()
if args.num_workers is None:
a = multiprocessing.cpu_count()
a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a = time.time()
a = load_dataset(args.dataset_name, split="""train""")
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
a = time.time()
a = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
a = set(ds.unique("""hash"""))
a = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
a = time.time()
a = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a = time.time()
a , a = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
a = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
a = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
a = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a = str(data_dir / F'''file-{file_number+1:012}.json''')
a = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''') | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: Collection[float] | None = None ):
if components is None:
_lowerCAmelCase :List[str] = []
_lowerCAmelCase :Tuple = list(_UpperCAmelCase )
def __len__( self: Optional[int] ):
return len(self.__components )
def __str__( self: Any ):
return "(" + ",".join(map(_UpperCAmelCase , self.__components ) ) + ")"
def __add__( self: Optional[Any] , _UpperCAmelCase: Vector ):
_lowerCAmelCase :Union[str, Any] = len(self )
if size == len(_UpperCAmelCase ):
_lowerCAmelCase :Tuple = [self.__components[i] + other.component(_UpperCAmelCase ) for i in range(_UpperCAmelCase )]
return Vector(_UpperCAmelCase )
else:
raise Exception('must have the same size' )
def __sub__( self: Optional[Any] , _UpperCAmelCase: Vector ):
_lowerCAmelCase :List[str] = len(self )
if size == len(_UpperCAmelCase ):
_lowerCAmelCase :str = [self.__components[i] - other.component(_UpperCAmelCase ) for i in range(_UpperCAmelCase )]
return Vector(_UpperCAmelCase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self: str , _UpperCAmelCase: float ):
...
@overload
def __mul__( self: int , _UpperCAmelCase: Vector ):
...
def __mul__( self: Any , _UpperCAmelCase: float | Vector ):
if isinstance(_UpperCAmelCase , (float, int) ):
_lowerCAmelCase :Optional[int] = [c * other for c in self.__components]
return Vector(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(self ) == len(_UpperCAmelCase ):
_lowerCAmelCase :str = len(self )
_lowerCAmelCase :Tuple = [self.__components[i] * other.component(_UpperCAmelCase ) for i in range(_UpperCAmelCase )]
return sum(_UpperCAmelCase )
else: # error case
raise Exception('invalid operand!' )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return Vector(self.__components )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: int ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: int , _UpperCAmelCase: float ):
assert -len(self.__components ) <= pos < len(self.__components )
_lowerCAmelCase :int = value
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
_lowerCAmelCase :Dict = [c**2 for c in self.__components]
return math.sqrt(sum(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Vector , _UpperCAmelCase: bool = False ):
_lowerCAmelCase :Dict = self * other
_lowerCAmelCase :Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
return Vector([0] * dimension )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and (isinstance(__magic_name__ , __magic_name__ ))
_lowerCAmelCase :Dict = [0] * dimension
_lowerCAmelCase :int = 1
return Vector(__magic_name__ )
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : Vector , __magic_name__ : Vector ):
"""simple docstring"""
assert (
isinstance(__magic_name__ , __magic_name__ )
and isinstance(__magic_name__ , __magic_name__ )
and (isinstance(__magic_name__ , (int, float) ))
)
return x * scalar + y
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
random.seed(__magic_name__ )
_lowerCAmelCase :Any = [random.randint(__magic_name__ , __magic_name__ ) for _ in range(__magic_name__ )]
return Vector(__magic_name__ )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Dict , _UpperCAmelCase: list[list[float]] , _UpperCAmelCase: int , _UpperCAmelCase: int ):
_lowerCAmelCase :str = matrix
_lowerCAmelCase :Dict = w
_lowerCAmelCase :Tuple = h
def __str__( self: Tuple ):
_lowerCAmelCase :Optional[int] = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self: int , _UpperCAmelCase: Matrix ):
if self.__width == other.width() and self.__height == other.height():
_lowerCAmelCase :Optional[Any] = []
for i in range(self.__height ):
_lowerCAmelCase :List[str] = [
self.__matrix[i][j] + other.component(_UpperCAmelCase , _UpperCAmelCase )
for j in range(self.__width )
]
matrix.append(_UpperCAmelCase )
return Matrix(_UpperCAmelCase , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self: Any , _UpperCAmelCase: Matrix ):
if self.__width == other.width() and self.__height == other.height():
_lowerCAmelCase :List[str] = []
for i in range(self.__height ):
_lowerCAmelCase :Tuple = [
self.__matrix[i][j] - other.component(_UpperCAmelCase , _UpperCAmelCase )
for j in range(self.__width )
]
matrix.append(_UpperCAmelCase )
return Matrix(_UpperCAmelCase , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self: List[str] , _UpperCAmelCase: float ):
...
@overload
def __mul__( self: Any , _UpperCAmelCase: Vector ):
...
def __mul__( self: Union[str, Any] , _UpperCAmelCase: float | Vector ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ): # matrix-vector
if len(_UpperCAmelCase ) == self.__width:
_lowerCAmelCase :str = zero_vector(self.__height )
for i in range(self.__height ):
_lowerCAmelCase :int = [
self.__matrix[i][j] * other.component(_UpperCAmelCase )
for j in range(self.__width )
]
ans.change_component(_UpperCAmelCase , sum(_UpperCAmelCase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_UpperCAmelCase , (int, float) ): # matrix-scalar
_lowerCAmelCase :Dict = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_UpperCAmelCase , self.__width , self.__height )
return None
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
return self.__height
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
return self.__width
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: int , _UpperCAmelCase: float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
_lowerCAmelCase :Optional[Any] = value
else:
raise Exception('change_component: indices out of bounds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
_lowerCAmelCase :int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_UpperCAmelCase ) ):
_lowerCAmelCase :Optional[Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_UpperCAmelCase , self.__width - 1 , self.__height - 1 ).determinant()
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: int , _UpperCAmelCase: int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_UpperCAmelCase , _UpperCAmelCase )
else:
raise Exception('Indices out of bounds' )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_lowerCAmelCase :Optional[Any] = [
self.__matrix[0][y] * self.cofactor(0 , _UpperCAmelCase ) for y in range(self.__width )
]
return sum(_UpperCAmelCase )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :list[list[float]] = [[0] * n for _ in range(__magic_name__ )]
return Matrix(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
random.seed(__magic_name__ )
_lowerCAmelCase :list[list[float]] = [
[random.randint(__magic_name__ , __magic_name__ ) for _ in range(__magic_name__ )] for _ in range(__magic_name__ )
]
return Matrix(__magic_name__ , __magic_name__ , __magic_name__ ) | 687 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : List[str] ):
"""simple docstring"""
warnings.warn(__magic_name__ , __magic_name__ )
requires_backends(__magic_name__ , 'sklearn' )
return (preds == labels).mean()
def UpperCamelCase_( __magic_name__ : List[str] , __magic_name__ : Dict ):
"""simple docstring"""
warnings.warn(__magic_name__ , __magic_name__ )
requires_backends(__magic_name__ , 'sklearn' )
_lowerCAmelCase :List[str] = simple_accuracy(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Any ):
"""simple docstring"""
warnings.warn(__magic_name__ , __magic_name__ )
requires_backends(__magic_name__ , 'sklearn' )
_lowerCAmelCase :Any = pearsonr(__magic_name__ , __magic_name__ )[0]
_lowerCAmelCase :int = spearmanr(__magic_name__ , __magic_name__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCamelCase_( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ):
"""simple docstring"""
warnings.warn(__magic_name__ , __magic_name__ )
requires_backends(__magic_name__ , 'sklearn' )
assert len(__magic_name__ ) == len(__magic_name__ ), f"""Predictions and labels have mismatched lengths {len(__magic_name__ )} and {len(__magic_name__ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__magic_name__ , __magic_name__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__magic_name__ , __magic_name__ )}
elif task_name == "mrpc":
return acc_and_fa(__magic_name__ , __magic_name__ )
elif task_name == "sts-b":
return pearson_and_spearman(__magic_name__ , __magic_name__ )
elif task_name == "qqp":
return acc_and_fa(__magic_name__ , __magic_name__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__magic_name__ , __magic_name__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__magic_name__ , __magic_name__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__magic_name__ , __magic_name__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(__magic_name__ , __magic_name__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__magic_name__ , __magic_name__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(__magic_name__ , __magic_name__ )}
else:
raise KeyError(__magic_name__ )
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : int ):
"""simple docstring"""
warnings.warn(__magic_name__ , __magic_name__ )
requires_backends(__magic_name__ , 'sklearn' )
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__magic_name__ )} and {len(__magic_name__ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__magic_name__ , __magic_name__ )}
else:
raise KeyError(__magic_name__ ) | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a = False
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase :Dict = 'A painting of a squirrel eating a burger '
_lowerCAmelCase :str = torch.manual_seed(0 )
_lowerCAmelCase :Optional[Any] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase :List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase :List[str] = generator.manual_seed(0 )
_lowerCAmelCase :int = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase :Dict = 'A painting of a squirrel eating a burger '
_lowerCAmelCase :Any = torch.manual_seed(0 )
_lowerCAmelCase :Tuple = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_lowerCAmelCase :str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase :List[str] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 687 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 1 |
from collections.abc import Generator
from math import sin
def UpperCamelCase_( __magic_name__ : bytes ):
"""simple docstring"""
if len(__magic_name__ ) != 32:
raise ValueError('Input must be of length 32' )
_lowerCAmelCase :Tuple = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_lowerCAmelCase :Dict = format(__magic_name__ , '08x' )[-8:]
_lowerCAmelCase :Any = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCamelCase_( __magic_name__ : bytes ):
"""simple docstring"""
_lowerCAmelCase :str = b''
for char in message:
bit_string += format(__magic_name__ , '08b' ).encode('utf-8' )
_lowerCAmelCase :Union[str, Any] = format(len(__magic_name__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCamelCase_( __magic_name__ : bytes ):
"""simple docstring"""
if len(__magic_name__ ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__magic_name__ ) , 512 ):
_lowerCAmelCase :Optional[int] = bit_string[pos : pos + 512]
_lowerCAmelCase :Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_lowerCAmelCase :Union[str, Any] = format(__magic_name__ , '032b' )
_lowerCAmelCase :Dict = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
return (a + b) % 2**32
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCamelCase_( __magic_name__ : bytes ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = preprocess(__magic_name__ )
_lowerCAmelCase :int = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowerCAmelCase :Tuple = 0X67452301
_lowerCAmelCase :List[Any] = 0Xefcdab89
_lowerCAmelCase :List[str] = 0X98badcfe
_lowerCAmelCase :List[str] = 0X10325476
_lowerCAmelCase :List[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
_lowerCAmelCase :Optional[int] = aa
_lowerCAmelCase :Optional[int] = ba
_lowerCAmelCase :Tuple = ca
_lowerCAmelCase :Union[str, Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowerCAmelCase :Any = d ^ (b & (c ^ d))
_lowerCAmelCase :int = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowerCAmelCase :Optional[Any] = c ^ (d & (b ^ c))
_lowerCAmelCase :Dict = (5 * i + 1) % 16
elif i <= 47:
_lowerCAmelCase :Optional[int] = b ^ c ^ d
_lowerCAmelCase :List[str] = (3 * i + 5) % 16
else:
_lowerCAmelCase :Any = c ^ (b | not_aa(__magic_name__ ))
_lowerCAmelCase :Any = (7 * i) % 16
_lowerCAmelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
_lowerCAmelCase :List[Any] = d
_lowerCAmelCase :Tuple = c
_lowerCAmelCase :Any = b
_lowerCAmelCase :Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_lowerCAmelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ )
_lowerCAmelCase :List[Any] = sum_aa(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = sum_aa(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
a = """scheduler_config.json"""
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = 2
lowerCamelCase : str = 3
lowerCamelCase : Tuple = 4
lowerCamelCase : Dict = 5
lowerCamelCase : str = 6
lowerCamelCase : Dict = 7
lowerCamelCase : str = 8
lowerCamelCase : Optional[Any] = 9
lowerCamelCase : Optional[int] = 10
lowerCamelCase : int = 11
lowerCamelCase : Union[str, Any] = 12
lowerCamelCase : str = 13
lowerCamelCase : Optional[Any] = 14
@dataclass
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : torch.FloatTensor
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Tuple = SCHEDULER_CONFIG_NAME
lowerCamelCase : Dict = []
lowerCamelCase : Dict = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: List[Any] , _UpperCAmelCase: Dict[str, Any] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Tuple=False , **_UpperCAmelCase: int , ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = cls.load_config(
pretrained_model_name_or_path=_UpperCAmelCase , subfolder=_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase , return_commit_hash=_UpperCAmelCase , **_UpperCAmelCase , )
return cls.from_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, os.PathLike] , _UpperCAmelCase: bool = False , **_UpperCAmelCase: Union[str, Any] ):
self.save_config(save_directory=_UpperCAmelCase , push_to_hub=_UpperCAmelCase , **_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Dict ):
_lowerCAmelCase :List[str] = list(set([cls.__name__] + cls._compatibles ) )
_lowerCAmelCase :Any = importlib.import_module(__name__.split('.' )[0] )
_lowerCAmelCase :Any = [
getattr(_UpperCAmelCase , _UpperCAmelCase ) for c in compatible_classes_str if hasattr(_UpperCAmelCase , _UpperCAmelCase )
]
return compatible_classes | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase_( __magic_name__ : Dataset , __magic_name__ : Dict[str, str] ):
"""simple docstring"""
_lowerCAmelCase :Dict = args.log_outputs
_lowerCAmelCase :Optional[int] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
_lowerCAmelCase :List[Any] = load_metric('wer' )
_lowerCAmelCase :str = load_metric('cer' )
# compute metrics
_lowerCAmelCase :Optional[Any] = wer.compute(references=result['target'] , predictions=result['prediction'] )
_lowerCAmelCase :Union[str, Any] = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
_lowerCAmelCase :Optional[Any] = f"""WER: {wer_result}\nCER: {cer_result}"""
print(__magic_name__ )
with open(f"""{dataset_id}_eval_results.txt""" , 'w' ) as f:
f.write(__magic_name__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_lowerCAmelCase :Union[str, Any] = f"""log_{dataset_id}_predictions.txt"""
_lowerCAmelCase :Optional[Any] = f"""log_{dataset_id}_targets.txt"""
with open(__magic_name__ , 'w' ) as p, open(__magic_name__ , 'w' ) as t:
# mapping function to write output
def write_to_file(__magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ):
p.write(f"""{i}""" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"""{i}""" + '\n' )
t.write(batch['target'] + '\n' )
result.map(__magic_name__ , with_indices=__magic_name__ )
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Any = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_lowerCAmelCase :str = re.sub(__magic_name__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_lowerCAmelCase :Optional[Any] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
_lowerCAmelCase :List[Any] = ' '.join(text.split(__magic_name__ ) )
return text
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__magic_name__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_lowerCAmelCase :List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
_lowerCAmelCase :Optional[Any] = feature_extractor.sampling_rate
# resample audio
_lowerCAmelCase :Union[str, Any] = dataset.cast_column('audio' , Audio(sampling_rate=__magic_name__ ) )
# load eval pipeline
if args.device is None:
_lowerCAmelCase :str = 0 if torch.cuda.is_available() else -1
_lowerCAmelCase :int = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__magic_name__ : Tuple ):
_lowerCAmelCase :Tuple = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_lowerCAmelCase :List[str] = prediction['text']
_lowerCAmelCase :Any = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
_lowerCAmelCase :List[Any] = dataset.map(__magic_name__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
a = parser.parse_args()
main(args) | 687 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
_lowerCAmelCase :Dict = False
if num < 0:
_lowerCAmelCase :Optional[Any] = True
_lowerCAmelCase :Optional[Any] = -num
_lowerCAmelCase :list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__magic_name__ ) for e in binary )
return "0b" + "".join(str(__magic_name__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a = logging.get_logger(__name__)
a = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
a = {
"""gpt-neox-20b""": 2_048,
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = ['input_ids', 'attention_mask']
def __init__( self: str , _UpperCAmelCase: Optional[int]=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Dict="<|endoftext|>" , _UpperCAmelCase: Any="<|endoftext|>" , _UpperCAmelCase: str="<|endoftext|>" , _UpperCAmelCase: Tuple=False , **_UpperCAmelCase: Tuple , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase :Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
_lowerCAmelCase :Optional[int] = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) )
_lowerCAmelCase :Optional[Any] = add_prefix_space
_lowerCAmelCase :List[Any] = pre_tok_class(**_UpperCAmelCase )
_lowerCAmelCase :List[str] = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[str] = None ):
_lowerCAmelCase :Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: "Conversation" ):
_lowerCAmelCase :List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] )
if len(_UpperCAmelCase ) > self.model_max_length:
_lowerCAmelCase :Dict = input_ids[-self.model_max_length :]
return input_ids | 687 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 1 |
import torch
from torch import nn
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: List[str] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[int]=1 , _UpperCAmelCase: Dict=False ):
super().__init__()
_lowerCAmelCase :Dict = n_token
_lowerCAmelCase :Tuple = d_embed
_lowerCAmelCase :str = d_proj
_lowerCAmelCase :int = cutoffs + [n_token]
_lowerCAmelCase :List[str] = [0] + self.cutoffs
_lowerCAmelCase :Any = div_val
_lowerCAmelCase :List[str] = self.cutoffs[0]
_lowerCAmelCase :int = len(self.cutoffs ) - 1
_lowerCAmelCase :int = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_lowerCAmelCase :Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_lowerCAmelCase :Any = nn.Parameter(torch.zeros(self.n_clusters ) )
_lowerCAmelCase :Optional[int] = nn.ModuleList()
_lowerCAmelCase :Any = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
else:
self.out_projs.append(_UpperCAmelCase )
self.out_layers.append(nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_lowerCAmelCase :Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(_UpperCAmelCase , r_idx - l_idx ) )
_lowerCAmelCase :Optional[int] = keep_order
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: Any , _UpperCAmelCase: str , _UpperCAmelCase: Dict , _UpperCAmelCase: Dict ):
if proj is None:
_lowerCAmelCase :int = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_lowerCAmelCase :List[Any] = nn.functional.linear(_UpperCAmelCase , proj.t().contiguous() )
_lowerCAmelCase :List[Any] = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Dict , _UpperCAmelCase: List[Any]=None , _UpperCAmelCase: List[str]=False ):
if labels is not None:
# Shift so that tokens < n predict n
_lowerCAmelCase :Tuple = hidden[..., :-1, :].contiguous()
_lowerCAmelCase :Tuple = labels[..., 1:].contiguous()
_lowerCAmelCase :List[Any] = hidden.view(-1 , hidden.size(-1 ) )
_lowerCAmelCase :Dict = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
_lowerCAmelCase :int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_lowerCAmelCase :str = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_lowerCAmelCase :Dict = labels != -100
_lowerCAmelCase :Dict = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
_lowerCAmelCase :Any = (
-nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_lowerCAmelCase :Dict = nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_lowerCAmelCase , _lowerCAmelCase :List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_lowerCAmelCase :int = self.out_layers[0].weight[l_idx:r_idx]
_lowerCAmelCase :Any = self.out_layers[0].bias[l_idx:r_idx]
else:
_lowerCAmelCase :Optional[Any] = self.out_layers[i].weight
_lowerCAmelCase :Union[str, Any] = self.out_layers[i].bias
if i == 0:
_lowerCAmelCase :Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_lowerCAmelCase :Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :List[str] = weights[0], biases[0], self.out_projs[0]
_lowerCAmelCase :List[Any] = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
if labels is None:
_lowerCAmelCase :Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_lowerCAmelCase :Dict = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
_lowerCAmelCase :List[Any] = 0
_lowerCAmelCase :Dict = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
_lowerCAmelCase , _lowerCAmelCase :str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_lowerCAmelCase :int = (labels >= l_idx) & (labels < r_idx)
_lowerCAmelCase :int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_lowerCAmelCase :int = labels.index_select(0 , _UpperCAmelCase ) - l_idx
_lowerCAmelCase :Dict = head_logprob.index_select(0 , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = hidden.index_select(0 , _UpperCAmelCase )
else:
_lowerCAmelCase :Tuple = hidden
if i == 0:
if labels is not None:
_lowerCAmelCase :Any = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_lowerCAmelCase :List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = weights[i], biases[i], self.out_projs[i]
_lowerCAmelCase :Union[str, Any] = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
_lowerCAmelCase :Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_lowerCAmelCase :int = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_lowerCAmelCase :List[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_lowerCAmelCase :Dict = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: List[str] ):
if self.n_clusters == 0:
_lowerCAmelCase :Tuple = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
_lowerCAmelCase , _lowerCAmelCase :List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_lowerCAmelCase :Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_lowerCAmelCase :Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_lowerCAmelCase :List[str] = self.out_layers[i].weight
_lowerCAmelCase :List[Any] = self.out_layers[i].bias
if i == 0:
_lowerCAmelCase :int = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_lowerCAmelCase :Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = weights[0], biases[0], self.out_projs[0]
_lowerCAmelCase :int = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_lowerCAmelCase :Optional[int] = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
_lowerCAmelCase :int = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
_lowerCAmelCase , _lowerCAmelCase :Dict = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_lowerCAmelCase :List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = weights[i], biases[i], self.out_projs[i]
_lowerCAmelCase :List[str] = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
_lowerCAmelCase :str = head_logprob[:, -i] + tail_logprob_i
_lowerCAmelCase :List[str] = logprob_i
return out | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
from __future__ import annotations
import math
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__magic_name__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
a = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
_lowerCAmelCase :Optional[int] = []
for num in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = 0
while 2 * i * i <= odd_composites[num]:
_lowerCAmelCase :Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(__magic_name__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__magic_name__ ) == n:
return list_nums
return []
def UpperCamelCase_( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''') | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_( __magic_name__ : List[str] ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: nn.Module , _UpperCAmelCase: int ):
super().__init__()
_lowerCAmelCase :List[str] = module
_lowerCAmelCase :int = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
_lowerCAmelCase :str = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: Optional[Any] , *_UpperCAmelCase: Dict , **_UpperCAmelCase: int ):
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'bigscience/bloom-1b7'
# Constant values
lowerCamelCase : Union[str, Any] = 2.109_6595_5269_2574
lowerCamelCase : List[Any] = 'Hello my name is'
lowerCamelCase : Any = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
lowerCamelCase : Any = 10
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
# Models and tokenizer
_lowerCAmelCase :Union[str, Any] = AutoTokenizer.from_pretrained(self.model_name )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
super().setUp()
# Models and tokenizer
_lowerCAmelCase :Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
_lowerCAmelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Tuple = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
_lowerCAmelCase :Union[str, Any] = config.to_dict()
_lowerCAmelCase :Union[str, Any] = config.to_diff_dict()
_lowerCAmelCase :List[str] = config.to_json_string()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
from bitsandbytes.nn import Paramsabit
_lowerCAmelCase :int = self.model_fpaa.get_memory_footprint()
_lowerCAmelCase :List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_lowerCAmelCase :List[str] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def SCREAMING_SNAKE_CASE__ ( self: str ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
_lowerCAmelCase :int = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Dict = BitsAndBytesConfig()
_lowerCAmelCase :Any = True
_lowerCAmelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
_lowerCAmelCase :Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
_lowerCAmelCase :Tuple = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[str] = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_lowerCAmelCase :List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
_lowerCAmelCase :int = self.model_fpaa.to(torch.floataa )
_lowerCAmelCase :Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
_lowerCAmelCase :Tuple = self.model_fpaa.to('cpu' )
# Check this does not throw an error
_lowerCAmelCase :Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
_lowerCAmelCase :List[str] = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Tuple = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Union[str, Any] ):
_lowerCAmelCase :List[Any] = 't5-small'
_lowerCAmelCase :Union[str, Any] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_lowerCAmelCase :str = AutoTokenizer.from_pretrained(cls.model_name )
_lowerCAmelCase :Dict = 'Translate in German: Hello, my dog is cute'
def SCREAMING_SNAKE_CASE__ ( self: int ):
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
from transformers import TaForConditionalGeneration
_lowerCAmelCase :Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
_lowerCAmelCase :Tuple = None
# test with `t5-small`
_lowerCAmelCase :Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
_lowerCAmelCase :str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowerCAmelCase :Optional[Any] = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
_lowerCAmelCase :int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
_lowerCAmelCase :Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowerCAmelCase :Union[str, Any] = model.generate(**_UpperCAmelCase )
_lowerCAmelCase :str = modules
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_lowerCAmelCase :Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_lowerCAmelCase :Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowerCAmelCase :List[str] = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
_lowerCAmelCase :str = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
_lowerCAmelCase :Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowerCAmelCase :str = model.generate(**_UpperCAmelCase )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Any ):
super().setUp()
# model_name
_lowerCAmelCase :Union[str, Any] = 'bigscience/bloom-560m'
_lowerCAmelCase :List[Any] = 't5-small'
# Different types of model
_lowerCAmelCase :Union[str, Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
_lowerCAmelCase :Optional[int] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
_lowerCAmelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
_lowerCAmelCase :Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self: int ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_lowerCAmelCase :List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_lowerCAmelCase :Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
_lowerCAmelCase :str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Tuple = 'facebook/opt-350m'
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self: str ):
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
_lowerCAmelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_lowerCAmelCase :Dict = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_lowerCAmelCase :List[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
_lowerCAmelCase :Optional[Any] = LoRALayer(module.q_proj , rank=16 )
_lowerCAmelCase :Optional[int] = LoRALayer(module.k_proj , rank=16 )
_lowerCAmelCase :List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
_lowerCAmelCase :int = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_lowerCAmelCase :Dict = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'gpt2-xl'
lowerCamelCase : Optional[int] = 3.3191_8548_5415_2187 | 687 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = 'time_series_transformer'
lowerCamelCase : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self: Optional[Any] , _UpperCAmelCase: Optional[int] = None , _UpperCAmelCase: Optional[int] = None , _UpperCAmelCase: str = "student_t" , _UpperCAmelCase: str = "nll" , _UpperCAmelCase: int = 1 , _UpperCAmelCase: List[int] = [1, 2, 3, 4, 5, 6, 7] , _UpperCAmelCase: Optional[Union[str, bool]] = "mean" , _UpperCAmelCase: int = 0 , _UpperCAmelCase: int = 0 , _UpperCAmelCase: int = 0 , _UpperCAmelCase: int = 0 , _UpperCAmelCase: Optional[List[int]] = None , _UpperCAmelCase: Optional[List[int]] = None , _UpperCAmelCase: int = 32 , _UpperCAmelCase: int = 32 , _UpperCAmelCase: int = 2 , _UpperCAmelCase: int = 2 , _UpperCAmelCase: int = 2 , _UpperCAmelCase: int = 2 , _UpperCAmelCase: bool = True , _UpperCAmelCase: str = "gelu" , _UpperCAmelCase: int = 64 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: int = 100 , _UpperCAmelCase: float = 0.0_2 , _UpperCAmelCase: str=True , **_UpperCAmelCase: Union[str, Any] , ):
# time series specific configuration
_lowerCAmelCase :int = prediction_length
_lowerCAmelCase :str = context_length or prediction_length
_lowerCAmelCase :Optional[int] = distribution_output
_lowerCAmelCase :List[Any] = loss
_lowerCAmelCase :Optional[Any] = input_size
_lowerCAmelCase :Optional[Any] = num_time_features
_lowerCAmelCase :List[Any] = lags_sequence
_lowerCAmelCase :Union[str, Any] = scaling
_lowerCAmelCase :Optional[Any] = num_dynamic_real_features
_lowerCAmelCase :Tuple = num_static_real_features
_lowerCAmelCase :Tuple = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_lowerCAmelCase :Optional[Any] = cardinality
else:
_lowerCAmelCase :Optional[int] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_lowerCAmelCase :Dict = embedding_dimension
else:
_lowerCAmelCase :Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowerCAmelCase :str = num_parallel_samples
# Transformer architecture configuration
_lowerCAmelCase :List[str] = input_size * len(_UpperCAmelCase ) + self._number_of_features
_lowerCAmelCase :Union[str, Any] = d_model
_lowerCAmelCase :List[str] = encoder_attention_heads
_lowerCAmelCase :Any = decoder_attention_heads
_lowerCAmelCase :Optional[Any] = encoder_ffn_dim
_lowerCAmelCase :Optional[int] = decoder_ffn_dim
_lowerCAmelCase :Union[str, Any] = encoder_layers
_lowerCAmelCase :str = decoder_layers
_lowerCAmelCase :List[Any] = dropout
_lowerCAmelCase :List[str] = attention_dropout
_lowerCAmelCase :str = activation_dropout
_lowerCAmelCase :List[Any] = encoder_layerdrop
_lowerCAmelCase :Any = decoder_layerdrop
_lowerCAmelCase :List[str] = activation_function
_lowerCAmelCase :int = init_std
_lowerCAmelCase :List[Any] = use_cache
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self: int ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = 'wavlm'
def __init__( self: Union[str, Any] , _UpperCAmelCase: Optional[int]=32 , _UpperCAmelCase: Optional[int]=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Tuple=12 , _UpperCAmelCase: Union[str, Any]=3072 , _UpperCAmelCase: Union[str, Any]="gelu" , _UpperCAmelCase: List[Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: int=0.1 , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: List[str]="group" , _UpperCAmelCase: List[str]="gelu" , _UpperCAmelCase: Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase: Dict=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase: Dict=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: List[Any]=128 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Dict=320 , _UpperCAmelCase: int=800 , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: int=0.0_5 , _UpperCAmelCase: List[str]=10 , _UpperCAmelCase: Optional[int]=2 , _UpperCAmelCase: Dict=0.0 , _UpperCAmelCase: List[str]=10 , _UpperCAmelCase: Dict=320 , _UpperCAmelCase: Any=2 , _UpperCAmelCase: List[str]=0.1 , _UpperCAmelCase: Any=100 , _UpperCAmelCase: Tuple=256 , _UpperCAmelCase: Tuple=256 , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Optional[Any]="mean" , _UpperCAmelCase: Dict=False , _UpperCAmelCase: int=False , _UpperCAmelCase: List[str]=256 , _UpperCAmelCase: int=(512, 512, 512, 512, 1500) , _UpperCAmelCase: List[Any]=(5, 3, 3, 1, 1) , _UpperCAmelCase: int=(1, 2, 3, 1, 1) , _UpperCAmelCase: Union[str, Any]=512 , _UpperCAmelCase: int=80 , _UpperCAmelCase: Optional[int]=0 , _UpperCAmelCase: Union[str, Any]=1 , _UpperCAmelCase: Union[str, Any]=2 , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: List[Any]=3 , _UpperCAmelCase: Optional[int]=2 , _UpperCAmelCase: str=3 , _UpperCAmelCase: int=None , **_UpperCAmelCase: List[str] , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
_lowerCAmelCase :int = hidden_size
_lowerCAmelCase :List[str] = feat_extract_norm
_lowerCAmelCase :Optional[int] = feat_extract_activation
_lowerCAmelCase :Union[str, Any] = list(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = list(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = conv_bias
_lowerCAmelCase :List[Any] = num_buckets
_lowerCAmelCase :Tuple = max_bucket_distance
_lowerCAmelCase :int = num_conv_pos_embeddings
_lowerCAmelCase :Optional[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase :Tuple = len(self.conv_dim )
_lowerCAmelCase :str = num_hidden_layers
_lowerCAmelCase :Union[str, Any] = intermediate_size
_lowerCAmelCase :int = hidden_act
_lowerCAmelCase :List[str] = num_attention_heads
_lowerCAmelCase :int = hidden_dropout
_lowerCAmelCase :Optional[Any] = attention_dropout
_lowerCAmelCase :str = activation_dropout
_lowerCAmelCase :Optional[int] = feat_proj_dropout
_lowerCAmelCase :Dict = final_dropout
_lowerCAmelCase :Union[str, Any] = layerdrop
_lowerCAmelCase :Dict = layer_norm_eps
_lowerCAmelCase :Optional[Any] = initializer_range
_lowerCAmelCase :List[str] = num_ctc_classes
_lowerCAmelCase :str = vocab_size
_lowerCAmelCase :List[Any] = do_stable_layer_norm
_lowerCAmelCase :List[str] = use_weighted_layer_sum
_lowerCAmelCase :List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase :str = apply_spec_augment
_lowerCAmelCase :Dict = mask_time_prob
_lowerCAmelCase :Tuple = mask_time_length
_lowerCAmelCase :Union[str, Any] = mask_time_min_masks
_lowerCAmelCase :Dict = mask_feature_prob
_lowerCAmelCase :Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase :Dict = num_codevectors_per_group
_lowerCAmelCase :Union[str, Any] = num_codevector_groups
_lowerCAmelCase :List[str] = contrastive_logits_temperature
_lowerCAmelCase :Union[str, Any] = num_negatives
_lowerCAmelCase :Dict = codevector_dim
_lowerCAmelCase :Any = proj_codevector_dim
_lowerCAmelCase :Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase :Dict = ctc_loss_reduction
_lowerCAmelCase :Any = ctc_zero_infinity
# adapter
_lowerCAmelCase :Optional[int] = add_adapter
_lowerCAmelCase :Union[str, Any] = adapter_kernel_size
_lowerCAmelCase :Tuple = adapter_stride
_lowerCAmelCase :Any = num_adapter_layers
_lowerCAmelCase :List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase :str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase :Any = list(_UpperCAmelCase )
_lowerCAmelCase :Dict = list(_UpperCAmelCase )
_lowerCAmelCase :Dict = list(_UpperCAmelCase )
_lowerCAmelCase :str = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
a = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int ):
"""simple docstring"""
for attribute in key.split('.' ):
_lowerCAmelCase :Optional[Any] = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
_lowerCAmelCase :List[str] = getattr(__magic_name__ , __magic_name__ ).shape
else:
_lowerCAmelCase :List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowerCAmelCase :Union[str, Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase :Union[str, Any] = value
elif weight_type == "weight_v":
_lowerCAmelCase :str = value
elif weight_type == "bias":
_lowerCAmelCase :str = value
else:
_lowerCAmelCase :Optional[Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = []
_lowerCAmelCase :Tuple = fairseq_model.state_dict()
_lowerCAmelCase :Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase :Tuple = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == 'group' , )
_lowerCAmelCase :List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowerCAmelCase :Union[str, Any] = True
if "*" in mapped_key:
_lowerCAmelCase :List[str] = name.split(__magic_name__ )[0].split('.' )[-2]
_lowerCAmelCase :Any = mapped_key.replace('*' , __magic_name__ )
if "weight_g" in name:
_lowerCAmelCase :int = 'weight_g'
elif "weight_v" in name:
_lowerCAmelCase :Any = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
_lowerCAmelCase :str = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase :List[str] = 'weight'
else:
_lowerCAmelCase :str = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :Tuple = full_name.split('conv_layers.' )[-1]
_lowerCAmelCase :List[Any] = name.split('.' )
_lowerCAmelCase :Union[str, Any] = int(items[0] )
_lowerCAmelCase :str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowerCAmelCase :List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowerCAmelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowerCAmelCase :Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowerCAmelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=None ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = torch.load(__magic_name__ )
_lowerCAmelCase :str = WavLMConfigOrig(checkpoint['cfg'] )
_lowerCAmelCase :List[str] = WavLMOrig(__magic_name__ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
_lowerCAmelCase :Union[str, Any] = WavLMConfig.from_pretrained(__magic_name__ )
else:
_lowerCAmelCase :Tuple = WavLMConfig()
_lowerCAmelCase :Optional[int] = WavLMModel(__magic_name__ )
recursively_load_weights(__magic_name__ , __magic_name__ )
hf_wavlm.save_pretrained(__magic_name__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 687 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : torch.FloatTensor
class UpperCAmelCase_ (snake_case__ , snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self: int , _UpperCAmelCase: int = 6_5536 , _UpperCAmelCase: Optional[int] = None , _UpperCAmelCase: int = 2 , _UpperCAmelCase: int = 2 , _UpperCAmelCase: int = 0 , _UpperCAmelCase: str = "fourier" , _UpperCAmelCase: bool = True , _UpperCAmelCase: bool = False , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase: Tuple[str] = "UNetMidBlock1D" , _UpperCAmelCase: str = None , _UpperCAmelCase: Tuple[int] = (32, 32, 64) , _UpperCAmelCase: str = None , _UpperCAmelCase: int = 8 , _UpperCAmelCase: int = 1 , _UpperCAmelCase: bool = False , ):
super().__init__()
_lowerCAmelCase :List[Any] = sample_size
# time
if time_embedding_type == "fourier":
_lowerCAmelCase :List[str] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase )
_lowerCAmelCase :Dict = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCAmelCase :Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = block_out_channels[0]
if use_timestep_embedding:
_lowerCAmelCase :List[Any] = block_out_channels[0] * 4
_lowerCAmelCase :Tuple = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
_lowerCAmelCase :List[Any] = nn.ModuleList([] )
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = nn.ModuleList([] )
_lowerCAmelCase :List[Any] = None
# down
_lowerCAmelCase :List[Any] = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase ):
_lowerCAmelCase :Dict = output_channel
_lowerCAmelCase :Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCAmelCase :Optional[Any] = i == len(_UpperCAmelCase ) - 1
_lowerCAmelCase :List[Any] = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase )
# mid
_lowerCAmelCase :List[str] = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
_lowerCAmelCase :List[str] = list(reversed(_UpperCAmelCase ) )
_lowerCAmelCase :Union[str, Any] = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCAmelCase :Optional[Any] = out_channels
else:
_lowerCAmelCase :Union[str, Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase ):
_lowerCAmelCase :Dict = output_channel
_lowerCAmelCase :str = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase ) - 1 else final_upsample_channels
)
_lowerCAmelCase :List[str] = i == len(_UpperCAmelCase ) - 1
_lowerCAmelCase :Dict = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = output_channel
# out
_lowerCAmelCase :Any = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCAmelCase :Optional[int] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: Union[torch.Tensor, float, int] , _UpperCAmelCase: bool = True , ):
_lowerCAmelCase :List[Any] = timestep
if not torch.is_tensor(_UpperCAmelCase ):
_lowerCAmelCase :int = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
_lowerCAmelCase :Tuple = timesteps[None].to(sample.device )
_lowerCAmelCase :str = self.time_proj(_UpperCAmelCase )
if self.config.use_timestep_embedding:
_lowerCAmelCase :List[str] = self.time_mlp(_UpperCAmelCase )
else:
_lowerCAmelCase :str = timestep_embed[..., None]
_lowerCAmelCase :Any = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCAmelCase :Optional[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCAmelCase :Optional[int] = ()
for downsample_block in self.down_blocks:
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCAmelCase :Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCAmelCase :Any = down_block_res_samples[-1:]
_lowerCAmelCase :Any = down_block_res_samples[:-1]
_lowerCAmelCase :str = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase )
# 5. post-process
if self.out_block:
_lowerCAmelCase :Any = self.out_block(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase ) | 687 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_lowerCAmelCase :Optional[Any] = Vector()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :int = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :str = Vector([1, 2] )
_lowerCAmelCase :Optional[Any] = Vector([1, 2, 3, 4, 5] )
_lowerCAmelCase :int = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_lowerCAmelCase :Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Any = Vector([1, 2, 3] )
_lowerCAmelCase :Optional[int] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :str = Vector([1, 2, 3] )
_lowerCAmelCase :List[str] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Tuple = Vector([1, 2, 3] )
_lowerCAmelCase :Tuple = Vector([2, -1, 4] ) # for test of dot product
_lowerCAmelCase :Dict = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Tuple = Vector([1, 2, 3] )
_lowerCAmelCase :int = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :str = Vector([1, 0, 0, 0, 0, 0] )
_lowerCAmelCase :Any = x.copy()
self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Optional[int] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase :Tuple = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase :Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_lowerCAmelCase :Optional[int] = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase :List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase :List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 687 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 1 |
import numpy as np
from PIL import Image
def UpperCamelCase_( __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Tuple = np.array(__magic_name__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_lowerCAmelCase :List[str] = 0
_lowerCAmelCase :List[str] = 0
_lowerCAmelCase :int = 0
_lowerCAmelCase :List[str] = 0
# compute the shape of the output matrix
_lowerCAmelCase :Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_lowerCAmelCase :Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_lowerCAmelCase :Union[str, Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_lowerCAmelCase :Any = 0
_lowerCAmelCase :str = 0
return updated_arr
def UpperCamelCase_( __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = np.array(__magic_name__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :Union[str, Any] = 0
_lowerCAmelCase :str = 0
_lowerCAmelCase :Optional[int] = 0
# compute the shape of the output matrix
_lowerCAmelCase :List[str] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_lowerCAmelCase :str = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_lowerCAmelCase :List[str] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_lowerCAmelCase :List[str] = 0
_lowerCAmelCase :Optional[int] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
a = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 687 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_lowerCAmelCase :Optional[int] = n - k
# Calculate C(n,k)
for i in range(__magic_name__ ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , __magic_name__ ) // (node_count + 1)
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
_lowerCAmelCase :str = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return catalan_number(__magic_name__ ) * factorial(__magic_name__ )
if __name__ == "__main__":
a = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
) | 687 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'conditional_detr'
lowerCamelCase : Union[str, Any] = ['past_key_values']
lowerCamelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self: int , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[int]=None , _UpperCAmelCase: Any=3 , _UpperCAmelCase: str=300 , _UpperCAmelCase: int=6 , _UpperCAmelCase: Dict=2048 , _UpperCAmelCase: List[Any]=8 , _UpperCAmelCase: List[Any]=6 , _UpperCAmelCase: int=2048 , _UpperCAmelCase: Optional[Any]=8 , _UpperCAmelCase: Tuple=0.0 , _UpperCAmelCase: Tuple=0.0 , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: Optional[Any]="relu" , _UpperCAmelCase: List[str]=256 , _UpperCAmelCase: Tuple=0.1 , _UpperCAmelCase: Optional[int]=0.0 , _UpperCAmelCase: Tuple=0.0 , _UpperCAmelCase: int=0.0_2 , _UpperCAmelCase: Union[str, Any]=1.0 , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: str="sine" , _UpperCAmelCase: Dict="resnet50" , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: Union[str, Any]=False , _UpperCAmelCase: str=2 , _UpperCAmelCase: Optional[int]=5 , _UpperCAmelCase: int=2 , _UpperCAmelCase: Optional[int]=1 , _UpperCAmelCase: str=1 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Dict=5 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Any=0.2_5 , **_UpperCAmelCase: Any , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCAmelCase :str = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = backbone_config.get('model_type' )
_lowerCAmelCase :Any = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase :Dict = config_class.from_dict(_UpperCAmelCase )
_lowerCAmelCase :Tuple = use_timm_backbone
_lowerCAmelCase :Tuple = backbone_config
_lowerCAmelCase :Any = num_channels
_lowerCAmelCase :List[Any] = num_queries
_lowerCAmelCase :List[Any] = d_model
_lowerCAmelCase :Union[str, Any] = encoder_ffn_dim
_lowerCAmelCase :Union[str, Any] = encoder_layers
_lowerCAmelCase :str = encoder_attention_heads
_lowerCAmelCase :str = decoder_ffn_dim
_lowerCAmelCase :List[Any] = decoder_layers
_lowerCAmelCase :List[str] = decoder_attention_heads
_lowerCAmelCase :Dict = dropout
_lowerCAmelCase :Dict = attention_dropout
_lowerCAmelCase :Optional[Any] = activation_dropout
_lowerCAmelCase :Any = activation_function
_lowerCAmelCase :Union[str, Any] = init_std
_lowerCAmelCase :Tuple = init_xavier_std
_lowerCAmelCase :int = encoder_layerdrop
_lowerCAmelCase :str = decoder_layerdrop
_lowerCAmelCase :Tuple = encoder_layers
_lowerCAmelCase :Tuple = auxiliary_loss
_lowerCAmelCase :Tuple = position_embedding_type
_lowerCAmelCase :int = backbone
_lowerCAmelCase :Dict = use_pretrained_backbone
_lowerCAmelCase :List[str] = dilation
# Hungarian matcher
_lowerCAmelCase :Any = class_cost
_lowerCAmelCase :Tuple = bbox_cost
_lowerCAmelCase :Optional[int] = giou_cost
# Loss coefficients
_lowerCAmelCase :Optional[int] = mask_loss_coefficient
_lowerCAmelCase :List[str] = dice_loss_coefficient
_lowerCAmelCase :int = cls_loss_coefficient
_lowerCAmelCase :Dict = bbox_loss_coefficient
_lowerCAmelCase :int = giou_loss_coefficient
_lowerCAmelCase :List[Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_lowerCAmelCase :Optional[int] = self.backbone_config.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
return 12 | 687 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Dict , _UpperCAmelCase: Any , _UpperCAmelCase: Dict=13 , _UpperCAmelCase: Tuple=7 , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: str=False , _UpperCAmelCase: str=False , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: int=99 , _UpperCAmelCase: Tuple=0 , _UpperCAmelCase: str=32 , _UpperCAmelCase: Any=5 , _UpperCAmelCase: Tuple=4 , _UpperCAmelCase: Optional[int]=0.1 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Optional[Any]=512 , _UpperCAmelCase: Any=12 , _UpperCAmelCase: Union[str, Any]=2 , _UpperCAmelCase: Optional[Any]=0.0_2 , _UpperCAmelCase: Any=3 , _UpperCAmelCase: Tuple=4 , _UpperCAmelCase: List[str]="last" , _UpperCAmelCase: List[str]=None , _UpperCAmelCase: Optional[int]=None , ):
_lowerCAmelCase :List[Any] = parent
_lowerCAmelCase :Optional[Any] = batch_size
_lowerCAmelCase :Union[str, Any] = seq_length
_lowerCAmelCase :Tuple = is_training
_lowerCAmelCase :str = use_input_lengths
_lowerCAmelCase :Tuple = use_token_type_ids
_lowerCAmelCase :Any = use_labels
_lowerCAmelCase :Union[str, Any] = gelu_activation
_lowerCAmelCase :Optional[Any] = sinusoidal_embeddings
_lowerCAmelCase :List[str] = causal
_lowerCAmelCase :Any = asm
_lowerCAmelCase :Tuple = n_langs
_lowerCAmelCase :int = vocab_size
_lowerCAmelCase :List[Any] = n_special
_lowerCAmelCase :str = hidden_size
_lowerCAmelCase :List[str] = num_hidden_layers
_lowerCAmelCase :Dict = num_attention_heads
_lowerCAmelCase :Dict = hidden_dropout_prob
_lowerCAmelCase :int = attention_probs_dropout_prob
_lowerCAmelCase :Any = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Optional[int] = type_sequence_label_size
_lowerCAmelCase :List[str] = initializer_range
_lowerCAmelCase :Optional[int] = num_labels
_lowerCAmelCase :Tuple = num_choices
_lowerCAmelCase :List[Any] = summary_type
_lowerCAmelCase :Tuple = use_proj
_lowerCAmelCase :Union[str, Any] = scope
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase :Any = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase :Any = None
if self.use_input_lengths:
_lowerCAmelCase :Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowerCAmelCase :Optional[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :List[Any] = None
_lowerCAmelCase :Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase :Tuple = ids_tensor([self.batch_size] , 2 ).float()
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase :List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Union[str, Any] , ):
_lowerCAmelCase :Optional[int] = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
_lowerCAmelCase :str = model(_UpperCAmelCase , langs=_UpperCAmelCase )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: str , _UpperCAmelCase: List[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: List[str] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Union[str, Any] , ):
_lowerCAmelCase :Optional[int] = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: List[str] , _UpperCAmelCase: List[str] , ):
_lowerCAmelCase :Dict = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Optional[int] = model(_UpperCAmelCase )
_lowerCAmelCase :int = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: Dict , _UpperCAmelCase: int , _UpperCAmelCase: str , _UpperCAmelCase: List[Any] , ):
_lowerCAmelCase :List[str] = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = model(_UpperCAmelCase )
_lowerCAmelCase :List[str] = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
_lowerCAmelCase :Optional[Any] = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((_lowerCAmelCase) , ) :Optional[int] = result_with_labels.to_tuple()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((_lowerCAmelCase) , ) :List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: str , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Dict , ):
_lowerCAmelCase :Any = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Optional[int] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :List[str] = self.num_labels
_lowerCAmelCase :List[Any] = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Optional[Any] , ):
_lowerCAmelCase :List[Any] = self.num_choices
_lowerCAmelCase :int = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase :Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase :Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase :List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) :Any = config_and_inputs
_lowerCAmelCase :Union[str, Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase : List[str] = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Dict , _UpperCAmelCase: Union[str, Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: List[Any]=False ):
_lowerCAmelCase :Optional[int] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_lowerCAmelCase :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
_lowerCAmelCase :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Any = FlaubertModelTester(self )
_lowerCAmelCase :Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def SCREAMING_SNAKE_CASE__ ( self: int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :Tuple = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_lowerCAmelCase :int = True
_lowerCAmelCase :List[str] = model_class(config=_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = torch.jit.trace(
_UpperCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'traced_model.pt' ) )
_lowerCAmelCase :Optional[Any] = torch.jit.load(os.path.join(_UpperCAmelCase , 'traced_model.pt' ) , map_location=_UpperCAmelCase )
loaded(inputs_dict['input_ids'].to(_UpperCAmelCase ) , inputs_dict['attention_mask'].to(_UpperCAmelCase ) )
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Dict = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
_lowerCAmelCase :Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_lowerCAmelCase :Dict = model(_UpperCAmelCase )[0]
_lowerCAmelCase :Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) ) | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
) | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Dict , _UpperCAmelCase: List[str]=13 , _UpperCAmelCase: Union[str, Any]=7 , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Any=False , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: Union[str, Any]=99 , _UpperCAmelCase: str=32 , _UpperCAmelCase: Dict=5 , _UpperCAmelCase: int=4 , _UpperCAmelCase: Union[str, Any]=64 , _UpperCAmelCase: Any="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Optional[int]=0.1 , _UpperCAmelCase: str=512 , _UpperCAmelCase: List[str]=16 , _UpperCAmelCase: Tuple=2 , _UpperCAmelCase: Optional[Any]=0.0_2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Any=4 , _UpperCAmelCase: Union[str, Any]=None , _UpperCAmelCase: Union[str, Any]=2 , _UpperCAmelCase: Tuple=2 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: List[Any]=4 , _UpperCAmelCase: str=1 , ):
_lowerCAmelCase :Tuple = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :List[str] = seq_length
_lowerCAmelCase :Optional[Any] = is_training
_lowerCAmelCase :List[str] = use_input_mask
_lowerCAmelCase :str = use_token_type_ids
_lowerCAmelCase :Optional[Any] = use_labels
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :int = hidden_size
_lowerCAmelCase :List[str] = num_hidden_layers
_lowerCAmelCase :Optional[int] = num_attention_heads
_lowerCAmelCase :str = intermediate_size
_lowerCAmelCase :Union[str, Any] = hidden_act
_lowerCAmelCase :Optional[Any] = hidden_dropout_prob
_lowerCAmelCase :List[str] = attention_probs_dropout_prob
_lowerCAmelCase :str = max_position_embeddings
_lowerCAmelCase :Any = type_vocab_size
_lowerCAmelCase :Optional[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :Any = num_labels
_lowerCAmelCase :int = num_choices
_lowerCAmelCase :Union[str, Any] = scope
_lowerCAmelCase :Optional[Any] = q_groups
_lowerCAmelCase :Any = k_groups
_lowerCAmelCase :Union[str, Any] = v_groups
_lowerCAmelCase :List[Any] = post_attention_groups
_lowerCAmelCase :Tuple = intermediate_groups
_lowerCAmelCase :Dict = output_groups
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase :Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase :int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase :Dict = None
_lowerCAmelCase :List[str] = None
_lowerCAmelCase :Any = None
if self.use_labels:
_lowerCAmelCase :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase :Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Any , _UpperCAmelCase: Any ):
_lowerCAmelCase :List[Any] = SqueezeBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :str = SqueezeBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: Any , _UpperCAmelCase: int , _UpperCAmelCase: List[Any] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: List[Any] ):
_lowerCAmelCase :Any = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: str , _UpperCAmelCase: int , _UpperCAmelCase: List[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: str ):
_lowerCAmelCase :List[str] = self.num_labels
_lowerCAmelCase :List[str] = SqueezeBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Any , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Dict , _UpperCAmelCase: Dict ):
_lowerCAmelCase :Any = self.num_labels
_lowerCAmelCase :Dict = SqueezeBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: str , _UpperCAmelCase: int , _UpperCAmelCase: Any , _UpperCAmelCase: List[Any] , _UpperCAmelCase: int ):
_lowerCAmelCase :Any = self.num_choices
_lowerCAmelCase :Any = SqueezeBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase :Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase :Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[Any] = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Union[str, Any] = config_and_inputs
_lowerCAmelCase :Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase : Optional[Any] = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Any = False
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Union[str, Any] = SqueezeBertModelTester(self )
_lowerCAmelCase :Any = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 )
def SCREAMING_SNAKE_CASE__ ( self: int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[str] = SqueezeBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
_lowerCAmelCase :str = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase )[0]
_lowerCAmelCase :Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCAmelCase )
_lowerCAmelCase :str = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) ) | 687 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = None
lowerCamelCase : Optional[Any] = BloomTokenizerFast
lowerCamelCase : int = BloomTokenizerFast
lowerCamelCase : List[str] = True
lowerCamelCase : Dict = False
lowerCamelCase : str = 'tokenizer_file'
lowerCamelCase : Optional[Any] = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
super().setUp()
_lowerCAmelCase :List[str] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , **_UpperCAmelCase: Tuple ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Dict = self.get_rust_tokenizer()
_lowerCAmelCase :Any = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_lowerCAmelCase :Dict = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
_lowerCAmelCase :List[Any] = tokenizer.batch_encode_plus(_UpperCAmelCase )['input_ids']
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: str=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase :str = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_lowerCAmelCase :Dict = 'This is a simple input'
_lowerCAmelCase :Dict = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase :str = ('This is a simple input', 'This is a pair')
_lowerCAmelCase :List[str] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
_lowerCAmelCase :Dict = None # Hotfixing padding = None
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :List[Any] = self.get_rust_tokenizer()
_lowerCAmelCase :Tuple = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = next(iter(_UpperCAmelCase ) )['premise'] # pick up one data
_lowerCAmelCase :Union[str, Any] = list(sample_data.values() )
_lowerCAmelCase :Dict = list(map(tokenizer.encode , _UpperCAmelCase ) )
_lowerCAmelCase :Optional[int] = [tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 687 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
import os
import sys
import unittest
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
a = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
a = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[Any] = get_test_to_tester_mapping(_UpperCAmelCase )
_lowerCAmelCase :List[str] = get_test_to_tester_mapping(_UpperCAmelCase )
_lowerCAmelCase :Any = {'BertModelTest': 'BertModelTester'}
_lowerCAmelCase :str = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = get_model_to_test_mapping(_UpperCAmelCase )
_lowerCAmelCase :int = get_model_to_test_mapping(_UpperCAmelCase )
_lowerCAmelCase :int = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
_lowerCAmelCase :Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = get_model_to_tester_mapping(_UpperCAmelCase )
_lowerCAmelCase :int = get_model_to_tester_mapping(_UpperCAmelCase )
_lowerCAmelCase :int = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
_lowerCAmelCase :List[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = ''
for word_or_phrase in separated:
if not isinstance(__magic_name__ , __magic_name__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(__magic_name__ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 687 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase_( __magic_name__ : str = "laptop" ):
"""simple docstring"""
_lowerCAmelCase :Any = f"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCAmelCase :int = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
_lowerCAmelCase :Dict = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase :Union[str, Any] = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
_lowerCAmelCase :int = item.ha.text
_lowerCAmelCase :Dict = 'https://www.amazon.in/' + item.ha.a['href']
_lowerCAmelCase :List[Any] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
_lowerCAmelCase :Optional[int] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
_lowerCAmelCase :List[str] = 'Not available'
try:
_lowerCAmelCase :Optional[int] = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
_lowerCAmelCase :Optional[int] = ''
try:
_lowerCAmelCase :Optional[int] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
_lowerCAmelCase :List[str] = float('nan' )
except AttributeError:
pass
_lowerCAmelCase :Optional[Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase :Any = ' '
_lowerCAmelCase :Tuple = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
a = """headphones"""
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''') | 687 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Any = ['pixel_values']
def __init__( self: Optional[int] , _UpperCAmelCase: bool = True , _UpperCAmelCase: Union[int, float] = 1 / 255 , _UpperCAmelCase: bool = True , _UpperCAmelCase: int = 8 , **_UpperCAmelCase: List[Any] , ):
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = do_rescale
_lowerCAmelCase :Tuple = rescale_factor
_lowerCAmelCase :int = do_pad
_lowerCAmelCase :int = pad_size
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: np.ndarray , _UpperCAmelCase: float , _UpperCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase: List[str] ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: np.ndarray , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Union[str, ChannelDimension]] = None ):
_lowerCAmelCase , _lowerCAmelCase :int = get_image_size(_UpperCAmelCase )
_lowerCAmelCase :Tuple = (old_height // size + 1) * size - old_height
_lowerCAmelCase :List[Any] = (old_width // size + 1) * size - old_width
return pad(_UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: ImageInput , _UpperCAmelCase: Optional[bool] = None , _UpperCAmelCase: Optional[float] = None , _UpperCAmelCase: Optional[bool] = None , _UpperCAmelCase: Optional[int] = None , _UpperCAmelCase: Optional[Union[str, TensorType]] = None , _UpperCAmelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **_UpperCAmelCase: List[str] , ):
_lowerCAmelCase :Dict = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase :Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase :List[str] = do_pad if do_pad is not None else self.do_pad
_lowerCAmelCase :Dict = pad_size if pad_size is not None else self.pad_size
_lowerCAmelCase :Dict = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase :List[str] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase :int = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_pad:
_lowerCAmelCase :Tuple = [self.pad(_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
_lowerCAmelCase :List[str] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_lowerCAmelCase :int = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase ) | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a = object()
# For specifying empty leaf dict `{}`
a = object()
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Dict = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ):
_lowerCAmelCase :int = [x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )]
if matches and all(__magic_name__ ):
return True
return False
def UpperCamelCase_( __magic_name__ : List[str] ):
"""simple docstring"""
def replace(__magic_name__ : List[Any] , __magic_name__ : int ):
for rule, replacement in rules:
if _match(__magic_name__ , __magic_name__ ):
return replacement
return val
return replace
def UpperCamelCase_( ):
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __magic_name__ )),
(("transformer", "wte", "embedding"), P('mp' , __magic_name__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __magic_name__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__magic_name__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __magic_name__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCamelCase_( __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :str = _get_partition_rules()
_lowerCAmelCase :Optional[Any] = _replacement_rules(__magic_name__ )
_lowerCAmelCase :List[Any] = {k: _unmatched for k in flatten_dict(__magic_name__ )}
_lowerCAmelCase :Optional[int] = {k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__magic_name__ ) ) | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
from collections.abc import Sequence
from queue import Queue
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int=None , _UpperCAmelCase: str=None ):
_lowerCAmelCase :Dict = start
_lowerCAmelCase :Union[str, Any] = end
_lowerCAmelCase :Optional[int] = val
_lowerCAmelCase :Optional[int] = (start + end) // 2
_lowerCAmelCase :List[str] = left
_lowerCAmelCase :Optional[Any] = right
def __repr__( self: List[Any] ):
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[str] , _UpperCAmelCase: Sequence , _UpperCAmelCase: List[Any] ):
_lowerCAmelCase :Any = collection
_lowerCAmelCase :Any = function
if self.collection:
_lowerCAmelCase :List[Any] = self._build_tree(0 , len(_UpperCAmelCase ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int] ):
self._update_tree(self.root , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int ):
return self._query_range(self.root , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
if start == end:
return SegmentTreeNode(_UpperCAmelCase , _UpperCAmelCase , self.collection[start] )
_lowerCAmelCase :Optional[int] = (start + end) // 2
_lowerCAmelCase :Tuple = self._build_tree(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self._build_tree(mid + 1 , _UpperCAmelCase )
return SegmentTreeNode(_UpperCAmelCase , _UpperCAmelCase , self.fn(left.val , right.val ) , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int] ):
if node.start == i and node.end == i:
_lowerCAmelCase :str = val
return
if i <= node.mid:
self._update_tree(node.left , _UpperCAmelCase , _UpperCAmelCase )
else:
self._update_tree(node.right , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = self.fn(node.left.val , node.right.val )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: int ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _UpperCAmelCase , _UpperCAmelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _UpperCAmelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , _UpperCAmelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
if self.root is not None:
_lowerCAmelCase :List[Any] = Queue()
queue.put(self.root )
while not queue.empty():
_lowerCAmelCase :List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
a = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 687 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
a = datasets.utils.logging.get_logger(__name__)
class UpperCAmelCase_ (folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
lowerCamelCase : bool = None
lowerCamelCase : bool = None
class UpperCAmelCase_ (folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = datasets.Audio()
lowerCamelCase : str = 'audio'
lowerCamelCase : str = AudioFolderConfig
lowerCamelCase : List[str] # definition at the bottom of the script
lowerCamelCase : Tuple = AudioClassification(audio_column='audio' , label_column='label' )
a = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
a = AUDIO_EXTENSIONS | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :list[list[int]] = [[0 for _ in range(__magic_name__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_lowerCAmelCase :str = 1
for n in range(m + 1 ):
for k in range(1 , __magic_name__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
a = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""") | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[str] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :str = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_lowerCAmelCase :Optional[Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_UpperCAmelCase ) )
]
_lowerCAmelCase :Union[str, Any] = defaultdict(_UpperCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_lowerCAmelCase :List[str] = (1 << len(_UpperCAmelCase )) - 1
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: Dict ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_lowerCAmelCase :Dict = self.count_ways_until(_UpperCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_lowerCAmelCase :Optional[Any] = total_ways_util
return self.dp[mask][task_no]
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Union[str, Any] ):
# Store the list of persons for each task
for i in range(len(_UpperCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(_UpperCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
a = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 687 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int = 100 ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase :int = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''') | 687 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int = 10 , __magic_name__ : int = 22 ):
"""simple docstring"""
_lowerCAmelCase :Tuple = range(1 , __magic_name__ )
_lowerCAmelCase :List[str] = range(1 , __magic_name__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(10, 22) = }''') | 687 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 1 |
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Union[str, Any] ):
_lowerCAmelCase :List[Any] = [2, 1, 2, -1]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[int] = len(self.first_signal )
_lowerCAmelCase :int = len(self.second_signal )
_lowerCAmelCase :Dict = max(_UpperCAmelCase , _UpperCAmelCase )
# create a zero matrix of max_length x max_length
_lowerCAmelCase :Union[str, Any] = [[0] * max_length for i in range(_UpperCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Dict = deque(self.second_signal )
rotated_signal.rotate(_UpperCAmelCase )
for j, item in enumerate(_UpperCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
_lowerCAmelCase :List[Any] = np.matmul(np.transpose(_UpperCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_UpperCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 687 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 1 |
from __future__ import annotations
def UpperCamelCase_( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : list[list[str]] , __magic_name__ : int , ):
"""simple docstring"""
_lowerCAmelCase :str = len(__magic_name__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__magic_name__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __magic_name__ , __magic_name__ , )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :list[list[str]] = []
depth_first_search([] , [] , [] , __magic_name__ , __magic_name__ )
# Print all the boards
for board in boards:
for column in board:
print(__magic_name__ )
print('' )
print(len(__magic_name__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(__magic_name__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 687 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = 'roberta'
def __init__( self: List[str] , _UpperCAmelCase: List[Any]=5_0265 , _UpperCAmelCase: Union[str, Any]=768 , _UpperCAmelCase: Any=12 , _UpperCAmelCase: Any=12 , _UpperCAmelCase: Optional[int]=3072 , _UpperCAmelCase: Union[str, Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Tuple=0.1 , _UpperCAmelCase: List[str]=512 , _UpperCAmelCase: Any=2 , _UpperCAmelCase: List[str]=0.0_2 , _UpperCAmelCase: Tuple=1e-1_2 , _UpperCAmelCase: Union[str, Any]=1 , _UpperCAmelCase: Tuple=0 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: str="absolute" , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: str=None , **_UpperCAmelCase: int , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :int = num_hidden_layers
_lowerCAmelCase :str = num_attention_heads
_lowerCAmelCase :int = hidden_act
_lowerCAmelCase :List[Any] = intermediate_size
_lowerCAmelCase :Dict = hidden_dropout_prob
_lowerCAmelCase :Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase :Optional[Any] = max_position_embeddings
_lowerCAmelCase :Tuple = type_vocab_size
_lowerCAmelCase :List[Any] = initializer_range
_lowerCAmelCase :Any = layer_norm_eps
_lowerCAmelCase :Tuple = position_embedding_type
_lowerCAmelCase :Optional[int] = use_cache
_lowerCAmelCase :Dict = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
if self.task == "multiple-choice":
_lowerCAmelCase :Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
a = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase : bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(default=snake_case__ , metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCamelCase : bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase : bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
if self.train_file is not None:
_lowerCAmelCase :Dict = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_lowerCAmelCase :Dict = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__( self: Tuple , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :str = 'label' if 'label' in features[0].keys() else 'labels'
_lowerCAmelCase :List[Any] = [feature.pop(_UpperCAmelCase ) for feature in features]
_lowerCAmelCase :int = len(_UpperCAmelCase )
_lowerCAmelCase :str = len(features[0]['input_ids'] )
_lowerCAmelCase :Dict = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
_lowerCAmelCase :Optional[int] = list(chain(*_UpperCAmelCase ) )
_lowerCAmelCase :Tuple = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
_lowerCAmelCase :Tuple = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
_lowerCAmelCase :Optional[int] = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase :List[Any] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
datasets.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCAmelCase :str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase :Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_lowerCAmelCase :Any = {}
if data_args.train_file is not None:
_lowerCAmelCase :Any = data_args.train_file
if data_args.validation_file is not None:
_lowerCAmelCase :str = data_args.validation_file
_lowerCAmelCase :Optional[int] = data_args.train_file.split('.' )[-1]
_lowerCAmelCase :Dict = load_dataset(
__magic_name__ , data_files=__magic_name__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_lowerCAmelCase :int = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase :Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase :Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase :int = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_lowerCAmelCase :Union[str, Any] = [f"""ending{i}""" for i in range(4 )]
_lowerCAmelCase :List[str] = 'sent1'
_lowerCAmelCase :Optional[Any] = 'sent2'
if data_args.max_seq_length is None:
_lowerCAmelCase :List[str] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_lowerCAmelCase :Tuple = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_lowerCAmelCase :List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__magic_name__ : Optional[int] ):
_lowerCAmelCase :Tuple = [[context] * 4 for context in examples[context_name]]
_lowerCAmelCase :List[str] = examples[question_header_name]
_lowerCAmelCase :str = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__magic_name__ )
]
# Flatten out
_lowerCAmelCase :str = list(chain(*__magic_name__ ) )
_lowerCAmelCase :Optional[Any] = list(chain(*__magic_name__ ) )
# Tokenize
_lowerCAmelCase :Dict = tokenizer(
__magic_name__ , __magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__magic_name__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowerCAmelCase :Union[str, Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowerCAmelCase :List[Any] = min(len(__magic_name__ ) , data_args.max_train_samples )
_lowerCAmelCase :Union[str, Any] = train_dataset.select(range(__magic_name__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_lowerCAmelCase :Dict = train_dataset.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowerCAmelCase :Tuple = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowerCAmelCase :Dict = min(len(__magic_name__ ) , data_args.max_eval_samples )
_lowerCAmelCase :List[Any] = eval_dataset.select(range(__magic_name__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_lowerCAmelCase :Any = eval_dataset.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_lowerCAmelCase :Optional[int] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__magic_name__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__magic_name__ : str ):
_lowerCAmelCase , _lowerCAmelCase :List[str] = eval_predictions
_lowerCAmelCase :Optional[int] = np.argmax(__magic_name__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_lowerCAmelCase :Any = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , compute_metrics=__magic_name__ , )
# Training
if training_args.do_train:
_lowerCAmelCase :int = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase :List[Any] = last_checkpoint
_lowerCAmelCase :Union[str, Any] = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_lowerCAmelCase :Union[str, Any] = train_result.metrics
_lowerCAmelCase :str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__magic_name__ )
)
_lowerCAmelCase :int = min(__magic_name__ , len(__magic_name__ ) )
trainer.log_metrics('train' , __magic_name__ )
trainer.save_metrics('train' , __magic_name__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase :str = trainer.evaluate()
_lowerCAmelCase :Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__magic_name__ )
_lowerCAmelCase :str = min(__magic_name__ , len(__magic_name__ ) )
trainer.log_metrics('eval' , __magic_name__ )
trainer.save_metrics('eval' , __magic_name__ )
_lowerCAmelCase :List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 687 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: int=13 , _UpperCAmelCase: str=7 , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: int=True , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Union[str, Any]=99 , _UpperCAmelCase: List[str]=32 , _UpperCAmelCase: List[Any]=5 , _UpperCAmelCase: str=4 , _UpperCAmelCase: Optional[int]=37 , _UpperCAmelCase: Union[str, Any]="gelu" , _UpperCAmelCase: int=0.1 , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Any=512 , _UpperCAmelCase: Optional[Any]=16 , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: List[Any]=0.0_2 , _UpperCAmelCase: Optional[int]=4 , ):
_lowerCAmelCase :Dict = parent
_lowerCAmelCase :Union[str, Any] = batch_size
_lowerCAmelCase :Tuple = seq_length
_lowerCAmelCase :str = is_training
_lowerCAmelCase :Dict = use_attention_mask
_lowerCAmelCase :Union[str, Any] = use_token_type_ids
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :Union[str, Any] = vocab_size
_lowerCAmelCase :Union[str, Any] = hidden_size
_lowerCAmelCase :Optional[Any] = num_hidden_layers
_lowerCAmelCase :List[str] = num_attention_heads
_lowerCAmelCase :List[str] = intermediate_size
_lowerCAmelCase :Dict = hidden_act
_lowerCAmelCase :Optional[Any] = hidden_dropout_prob
_lowerCAmelCase :Any = attention_probs_dropout_prob
_lowerCAmelCase :str = max_position_embeddings
_lowerCAmelCase :Tuple = type_vocab_size
_lowerCAmelCase :Tuple = type_sequence_label_size
_lowerCAmelCase :Dict = initializer_range
_lowerCAmelCase :str = num_choices
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase :Tuple = None
if self.use_attention_mask:
_lowerCAmelCase :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase :Any = None
if self.use_token_type_ids:
_lowerCAmelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase :Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = config_and_inputs
_lowerCAmelCase :Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :int = FlaxAlbertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase :Tuple = model_class_name.from_pretrained('albert-base-v2' )
_lowerCAmelCase :Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Union[str, Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
_lowerCAmelCase :Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase :int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCAmelCase :Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
_lowerCAmelCase :Union[str, Any] = (1, 11, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
_lowerCAmelCase :Tuple = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) ) | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
from __future__ import annotations
import pandas as pd
def UpperCamelCase_( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Dict = [0] * no_of_processes
_lowerCAmelCase :Optional[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__magic_name__ ):
_lowerCAmelCase :Dict = burst_time[i]
_lowerCAmelCase :List[Any] = 0
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :Optional[int] = 999999999
_lowerCAmelCase :str = 0
_lowerCAmelCase :List[str] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__magic_name__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_lowerCAmelCase :List[Any] = remaining_time[j]
_lowerCAmelCase :str = j
_lowerCAmelCase :Dict = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_lowerCAmelCase :Optional[int] = remaining_time[short]
if minm == 0:
_lowerCAmelCase :Optional[Any] = 999999999
if remaining_time[short] == 0:
complete += 1
_lowerCAmelCase :List[Any] = False
# Find finish time of current process
_lowerCAmelCase :Dict = increment_time + 1
# Calculate waiting time
_lowerCAmelCase :Optional[int] = finish_time - arrival_time[short]
_lowerCAmelCase :int = finar - burst_time[short]
if waiting_time[short] < 0:
_lowerCAmelCase :Tuple = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCamelCase_( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : list[int] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = [0] * no_of_processes
for i in range(__magic_name__ ):
_lowerCAmelCase :Tuple = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCamelCase_( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :str = 0
_lowerCAmelCase :Tuple = 0
for i in range(__magic_name__ ):
_lowerCAmelCase :Union[str, Any] = total_waiting_time + waiting_time[i]
_lowerCAmelCase :Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
a = int(input())
a = [0] * no_of_processes
a = [0] * no_of_processes
a = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
a , a = map(int, input().split())
a = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a = burst_time
a = no_of_processes
a = waiting_time
a = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs) | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
if hor == 128:
_lowerCAmelCase :Optional[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_lowerCAmelCase :int = (32, 128, 256)
_lowerCAmelCase :Union[str, Any] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
_lowerCAmelCase :Dict = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_lowerCAmelCase :Dict = (32, 64, 128, 256)
_lowerCAmelCase :int = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
_lowerCAmelCase :List[str] = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_lowerCAmelCase :List[Any] = model.state_dict()
_lowerCAmelCase :List[Any] = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
_lowerCAmelCase :Optional[int] = UNetaDModel(**__magic_name__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_lowerCAmelCase :List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase :Union[str, Any] = state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , 'w' ) as f:
json.dump(__magic_name__ , __magic_name__ )
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
_lowerCAmelCase :Any = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
_lowerCAmelCase :Optional[Any] = model
_lowerCAmelCase :List[str] = UNetaDModel(**__magic_name__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_lowerCAmelCase :Optional[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase :Dict = state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 687 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
a = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCamelCase : int = TaTokenizer
lowerCamelCase : List[int] = []
def __init__( self: Optional[int] , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Any=None , _UpperCAmelCase: int="</s>" , _UpperCAmelCase: Tuple="<unk>" , _UpperCAmelCase: List[Any]="<pad>" , _UpperCAmelCase: Optional[Any]=100 , _UpperCAmelCase: Any=None , **_UpperCAmelCase: Dict , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase :Tuple = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_lowerCAmelCase :Optional[Any] = len(set(filter(lambda _UpperCAmelCase : bool('extra_id_' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase :str = vocab_file
_lowerCAmelCase :Optional[Any] = False if not self.vocab_file else True
_lowerCAmelCase :Union[str, Any] = extra_ids
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase: List[Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: List[str] ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_lowerCAmelCase :Tuple = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _UpperCAmelCase , )
return max_model_length
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: str , _UpperCAmelCase: Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase :List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: List[int] , _UpperCAmelCase: Optional[List[int]] = None ):
_lowerCAmelCase :List[str] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_lowerCAmelCase :List[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[int] , _UpperCAmelCase: Optional[List[int]] = None ):
_lowerCAmelCase :Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self: int ):
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(r'<extra_id_\d+>' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()] | 687 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
a = logging.getLogger()
a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[int] ):
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_lowerCAmelCase :int = {'source': 'What is love ?', 'target': 'life'}
_lowerCAmelCase :str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCAmelCase :Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(_UpperCAmelCase , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: str = "pytorch" ):
_lowerCAmelCase :Dict = self.get_auto_remove_tmp_dir()
_lowerCAmelCase :int = os.path.join(_UpperCAmelCase , 'output' )
_lowerCAmelCase :Tuple = os.path.join(_UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=_UpperCAmelCase )
_lowerCAmelCase :Dict = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_lowerCAmelCase :str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
_lowerCAmelCase :Optional[int] = os.path.join(_UpperCAmelCase , 'metrics.json' )
with open(_UpperCAmelCase ) as f:
_lowerCAmelCase :List[str] = json.load(_UpperCAmelCase )
return result
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Optional[int] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Dict = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : List[str]
lowerCamelCase : Optional[str] = None
# Automatically constructed
lowerCamelCase : ClassVar[str] = "dict"
lowerCamelCase : ClassVar[Any] = None
lowerCamelCase : str = field(default='Translation' , init=snake_case__ , repr=snake_case__ )
def __call__( self: List[Any] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE__ ( self: int ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[List] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[str] = None
# Automatically constructed
lowerCamelCase : ClassVar[str] = "dict"
lowerCamelCase : ClassVar[Any] = None
lowerCamelCase : str = field(default='TranslationVariableLanguages' , init=snake_case__ , repr=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase :Tuple = len(self.languages ) if self.languages else None
def __call__( self: Tuple ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase :List[str] = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE__ ( self: str ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
} | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[Union[str, Path]] = None
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : Optional[Dict] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : int = 1
lowerCamelCase : Optional[Union[str, bool]] = None
lowerCamelCase : bool = False
lowerCamelCase : Optional[Dict] = None
lowerCamelCase : Optional[str] = None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return self.__class__(**{k: copy.deepcopy(_UpperCAmelCase ) for k, v in self.__dict__.items()} ) | 687 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def UpperCamelCase_( ):
"""simple docstring"""
print('Making key files...' )
make_key_files('rsa' , 1024 )
print('Key files generation successful.' )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
print('Generating prime p...' )
_lowerCAmelCase :Any = rabinMiller.generate_large_prime(__magic_name__ )
print('Generating prime q...' )
_lowerCAmelCase :Any = rabinMiller.generate_large_prime(__magic_name__ )
_lowerCAmelCase :List[Any] = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
_lowerCAmelCase :str = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__magic_name__ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
_lowerCAmelCase :List[str] = cryptoMath.find_mod_inverse(__magic_name__ , (p - 1) * (q - 1) )
_lowerCAmelCase :Optional[Any] = (n, e)
_lowerCAmelCase :Tuple = (n, d)
return (public_key, private_key)
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = generate_key(__magic_name__ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main() | 687 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
a = logging.getLogger(__name__)
a = tf.data.AUTOTUNE
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=__magic_name__ , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=__magic_name__ , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=__magic_name__ , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=__magic_name__ , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=__magic_name__ , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=__magic_name__ , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=__magic_name__ , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=__magic_name__ , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=__magic_name__ , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=__magic_name__ , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=__magic_name__ , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=__magic_name__ , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=__magic_name__ , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=__magic_name__ , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=__magic_name__ , required=__magic_name__ , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=__magic_name__ , help='Model ID to upload to on the Hugging Face Hub.' )
_lowerCAmelCase :str = parser.parse_args()
return args
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
try:
if args.tpu_name:
_lowerCAmelCase :List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_lowerCAmelCase :Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def UpperCamelCase_( __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :List[str] = 0
for file in file_list:
_lowerCAmelCase :Tuple = file.split('/' )[-1]
_lowerCAmelCase :Any = re.search(r'-\d+-(\d+)\.tfrecord' , __magic_name__ ).group(1 )
_lowerCAmelCase :List[str] = int(__magic_name__ )
num_samples += sample_count
return num_samples
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : int=None ):
"""simple docstring"""
_lowerCAmelCase :str = count_samples(__magic_name__ )
_lowerCAmelCase :Any = tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
_lowerCAmelCase :Union[str, Any] = dataset.shuffle(len(__magic_name__ ) )
_lowerCAmelCase :Union[str, Any] = tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_lowerCAmelCase :Dict = dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
_lowerCAmelCase :Any = dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
_lowerCAmelCase :Dict = dataset.shuffle(args.shuffle_buffer_size )
_lowerCAmelCase :Optional[Any] = dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
_lowerCAmelCase :Optional[int] = dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
_lowerCAmelCase :str = dataset.prefetch(__magic_name__ )
return dataset
def UpperCamelCase_( __magic_name__ : Optional[Any] ):
"""simple docstring"""
if not args.no_tpu:
_lowerCAmelCase :str = initialize_tpu(__magic_name__ )
_lowerCAmelCase :Any = tf.distribute.TPUStrategy(__magic_name__ )
else:
_lowerCAmelCase :Optional[int] = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
_lowerCAmelCase :Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer )
_lowerCAmelCase :int = AutoConfig.from_pretrained(args.pretrained_model_config )
_lowerCAmelCase :Optional[Any] = tokenizer.vocab_size
_lowerCAmelCase :Dict = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
_lowerCAmelCase :Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
_lowerCAmelCase :Any = count_samples(__magic_name__ )
_lowerCAmelCase :Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_lowerCAmelCase :Optional[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
_lowerCAmelCase :Tuple = TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_lowerCAmelCase , _lowerCAmelCase :Tuple = create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['accuracy'] )
def decode_fn(__magic_name__ : Tuple ):
_lowerCAmelCase :List[Any] = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_lowerCAmelCase :Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='tf' )
def mask_with_collator(__magic_name__ : List[str] ):
# TF really needs an isin() function
_lowerCAmelCase :int = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
_lowerCAmelCase , _lowerCAmelCase :Tuple = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
_lowerCAmelCase :Optional[Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
_lowerCAmelCase :int = prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
_lowerCAmelCase :int = prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
_lowerCAmelCase :int = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
a = parse_args()
main(args) | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a = logging.getLogger(__name__)
def UpperCamelCase_( __magic_name__ : torch.nn.Module , __magic_name__ : BnbQuantizationConfig , __magic_name__ : Union[str, os.PathLike] = None , __magic_name__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , __magic_name__ : Optional[List[str]] = None , __magic_name__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , __magic_name__ : Optional[Union[str, os.PathLike]] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = bnb_quantization_config.load_in_abit
_lowerCAmelCase :Union[str, Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_lowerCAmelCase :List[Any] = []
# custom device map
if isinstance(__magic_name__ , __magic_name__ ) and len(device_map.keys() ) > 1:
_lowerCAmelCase :Optional[Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_lowerCAmelCase :str = get_keys_to_not_convert(__magic_name__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__magic_name__ )
_lowerCAmelCase :int = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_lowerCAmelCase :List[str] = []
_lowerCAmelCase :Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__magic_name__ )
# compatibility with peft
_lowerCAmelCase :Union[str, Any] = load_in_abit
_lowerCAmelCase :int = load_in_abit
_lowerCAmelCase :List[str] = get_parameter_device(__magic_name__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_lowerCAmelCase :List[str] = replace_with_bnb_layers(__magic_name__ , __magic_name__ , modules_to_not_convert=__magic_name__ )
# convert param to the right dtype
_lowerCAmelCase :Optional[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_lowerCAmelCase :Dict = name.replace('.weight' , '' ).replace('.bias' , '' )
_lowerCAmelCase :Tuple = getattr(__magic_name__ , __magic_name__ , __magic_name__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__magic_name__ ):
param.to(__magic_name__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
_lowerCAmelCase :Dict = replace_with_bnb_layers(
__magic_name__ , __magic_name__ , modules_to_not_convert=__magic_name__ )
_lowerCAmelCase :Optional[Any] = get_quantized_model_device_map(
__magic_name__ , __magic_name__ , __magic_name__ , max_memory=__magic_name__ , no_split_module_classes=__magic_name__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_lowerCAmelCase :Any = True
_lowerCAmelCase :Tuple = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__magic_name__ , __magic_name__ , __magic_name__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=__magic_name__ , offload_state_dict=__magic_name__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__magic_name__ , device_map=__magic_name__ , offload_dir=__magic_name__ )
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
_lowerCAmelCase :Union[str, Any] = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__magic_name__ , __magic_name__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_lowerCAmelCase :Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_lowerCAmelCase :List[str] = {}
_lowerCAmelCase :List[str] = special_dtypes
_lowerCAmelCase :Tuple = no_split_module_classes
_lowerCAmelCase :Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_lowerCAmelCase :Optional[int] = get_balanced_memory(
__magic_name__ , low_zero=(device_map == 'balanced_low_0') , max_memory=__magic_name__ , **__magic_name__ , )
_lowerCAmelCase :Tuple = max_memory
_lowerCAmelCase :str = infer_auto_device_map(__magic_name__ , **__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ):
# check if don't have any quantized module on the cpu
_lowerCAmelCase :Dict = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_lowerCAmelCase :Tuple = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : str=None , __magic_name__ : int=None ):
"""simple docstring"""
if modules_to_not_convert is None:
_lowerCAmelCase :Dict = []
_lowerCAmelCase , _lowerCAmelCase :List[Any] = _replace_with_bnb_layers(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase_( __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Union[str, Any]=None , __magic_name__ : str=None , ):
"""simple docstring"""
_lowerCAmelCase :Any = False
for name, module in model.named_children():
if current_key_name is None:
_lowerCAmelCase :Dict = []
current_key_name.append(__magic_name__ )
if isinstance(__magic_name__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_lowerCAmelCase :Dict = '.'.join(__magic_name__ )
_lowerCAmelCase :Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_lowerCAmelCase :Tuple = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_lowerCAmelCase :Any = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__magic_name__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_lowerCAmelCase :Tuple = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_lowerCAmelCase :int = module.weight.data
if module.bias is not None:
_lowerCAmelCase :str = module.bias.data
bnb_module.requires_grad_(__magic_name__ )
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :int = True
if len(list(module.children() ) ) > 0:
_lowerCAmelCase , _lowerCAmelCase :Any = _replace_with_bnb_layers(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
with init_empty_weights():
_lowerCAmelCase :str = deepcopy(__magic_name__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_lowerCAmelCase :Optional[Any] = find_tied_parameters(__magic_name__ )
# For compatibility with Accelerate < 0.18
if isinstance(__magic_name__ , __magic_name__ ):
_lowerCAmelCase :int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCAmelCase :Union[str, Any] = sum(__magic_name__ , [] )
_lowerCAmelCase :Any = len(__magic_name__ ) > 0
# Check if it is a base model
_lowerCAmelCase :List[Any] = False
if hasattr(__magic_name__ , 'base_model_prefix' ):
_lowerCAmelCase :str = not hasattr(__magic_name__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCAmelCase :Tuple = list(model.named_children() )
_lowerCAmelCase :Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCAmelCase :Tuple = set(__magic_name__ ) - set(__magic_name__ )
_lowerCAmelCase :Any = list(set(__magic_name__ ) ) + list(__magic_name__ )
# remove ".weight" from the keys
_lowerCAmelCase :Optional[int] = ['.weight', '.bias']
_lowerCAmelCase :Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCAmelCase :List[Any] = name.replace(__magic_name__ , '' )
filtered_module_names.append(__magic_name__ )
return filtered_module_names
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
for m in model.modules():
if isinstance(__magic_name__ , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase_( __magic_name__ : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def UpperCamelCase_( __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__magic_name__ , __magic_name__ , 0 , dtype=__magic_name__ , value=__magic_name__ )
_lowerCAmelCase :List[Any] = param_name
_lowerCAmelCase :Dict = model
if "." in tensor_name:
_lowerCAmelCase :List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
_lowerCAmelCase :int = getattr(__magic_name__ , __magic_name__ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_lowerCAmelCase :Tuple = new_module
_lowerCAmelCase :Optional[Any] = splits[-1]
# offload weights
_lowerCAmelCase :Union[str, Any] = False
offload_weight(module._parameters[tensor_name] , __magic_name__ , __magic_name__ , index=__magic_name__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __magic_name__ , index=__magic_name__ , )
else:
offload_weight(__magic_name__ , __magic_name__ , __magic_name__ , index=__magic_name__ )
offload_weight(__magic_name__ , param_name.replace('weight' , 'SCB' ) , __magic_name__ , index=__magic_name__ )
set_module_tensor_to_device(__magic_name__ , __magic_name__ , 'meta' , dtype=__magic_name__ , value=torch.empty(*param.size() ) ) | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
def UpperCamelCase_( __magic_name__ : list ):
"""simple docstring"""
_lowerCAmelCase :int = False
while is_sorted is False: # Until all the indices are traversed keep looping
_lowerCAmelCase :Optional[Any] = True
for i in range(0 , len(__magic_name__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_lowerCAmelCase , _lowerCAmelCase :List[str] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_lowerCAmelCase :Optional[int] = False
for i in range(1 , len(__magic_name__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_lowerCAmelCase :Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
a = [int(x) for x in input().split()]
# inputing elements of the list in one line
a = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list) | 687 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_lowerCAmelCase :Tuple = DetaConfig(
backbone_config=__magic_name__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__magic_name__ , with_box_refine=__magic_name__ , two_stage=__magic_name__ , )
# set labels
_lowerCAmelCase :int = 'huggingface/label-files'
if "o365" in model_name:
_lowerCAmelCase :Optional[int] = 366
_lowerCAmelCase :str = 'object365-id2label.json'
else:
_lowerCAmelCase :Dict = 91
_lowerCAmelCase :Dict = 'coco-detection-id2label.json'
_lowerCAmelCase :Dict = num_labels
_lowerCAmelCase :Any = json.load(open(cached_download(hf_hub_url(__magic_name__ , __magic_name__ , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase :Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
_lowerCAmelCase :Any = idalabel
_lowerCAmelCase :Dict = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_( __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = dct.pop(__magic_name__ )
_lowerCAmelCase :List[str] = val
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase :Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase :Optional[int] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_lowerCAmelCase :Tuple = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase :List[str] = in_proj_weight[:dim, :]
_lowerCAmelCase :Tuple = in_proj_bias[: dim]
_lowerCAmelCase :Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase :Optional[int] = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase :int = in_proj_weight[
-dim :, :
]
_lowerCAmelCase :str = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_lowerCAmelCase :int = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase :Dict = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase :int = in_proj_weight[:hidden_size, :]
_lowerCAmelCase :Any = in_proj_bias[:hidden_size]
_lowerCAmelCase :Tuple = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_lowerCAmelCase :str = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCAmelCase :Any = in_proj_weight[-hidden_size:, :]
_lowerCAmelCase :List[str] = in_proj_bias[-hidden_size:]
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase :List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = get_deta_config(__magic_name__ )
# load original state dict
if model_name == "deta-swin-large":
_lowerCAmelCase :Union[str, Any] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_lowerCAmelCase :str = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
_lowerCAmelCase :int = torch.load(__magic_name__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__magic_name__ , param.shape )
# rename keys
_lowerCAmelCase :List[Any] = create_rename_keys(__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_swin_q_k_v(__magic_name__ , config.backbone_config )
read_in_decoder_q_k_v(__magic_name__ , __magic_name__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_lowerCAmelCase :Dict = state_dict.pop(__magic_name__ )
_lowerCAmelCase :Tuple = val
if "input_proj" in key:
_lowerCAmelCase :List[str] = state_dict.pop(__magic_name__ )
_lowerCAmelCase :Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_lowerCAmelCase :List[Any] = state_dict.pop(__magic_name__ )
_lowerCAmelCase :List[Any] = val
# finally, create HuggingFace model and load state dict
_lowerCAmelCase :Union[str, Any] = DetaForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
_lowerCAmelCase :str = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__magic_name__ )
# load image processor
_lowerCAmelCase :Dict = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_lowerCAmelCase :str = prepare_img()
_lowerCAmelCase :List[Any] = processor(images=__magic_name__ , return_tensors='pt' )
_lowerCAmelCase :str = encoding['pixel_values']
_lowerCAmelCase :Tuple = model(pixel_values.to(__magic_name__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_lowerCAmelCase :Tuple = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_lowerCAmelCase :List[str] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_lowerCAmelCase :Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_lowerCAmelCase :Union[str, Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__magic_name__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__magic_name__ ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = 'naver-clova-ix/donut-base-finetuned-docvqa'
lowerCamelCase : Optional[int] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
lowerCamelCase : List[str] = 'document_qa'
lowerCamelCase : Dict = AutoProcessor
lowerCamelCase : str = VisionEncoderDecoderModel
lowerCamelCase : List[str] = ['image', 'text']
lowerCamelCase : Optional[int] = ['text']
def __init__( self: int , *_UpperCAmelCase: Tuple , **_UpperCAmelCase: Any ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: "Image" , _UpperCAmelCase: str ):
_lowerCAmelCase :int = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_lowerCAmelCase :Optional[Any] = task_prompt.replace('{user_input}' , _UpperCAmelCase )
_lowerCAmelCase :Any = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='pt' ).input_ids
_lowerCAmelCase :Tuple = self.pre_processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Any ):
_lowerCAmelCase :Optional[int] = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
_lowerCAmelCase :List[str] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
_lowerCAmelCase :Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
_lowerCAmelCase :str = re.sub(r'<.*?>' , '' , _UpperCAmelCase , count=1 ).strip() # remove first task start token
_lowerCAmelCase :Tuple = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"] | 687 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'openai-gpt'
lowerCamelCase : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self: List[Any] , _UpperCAmelCase: Optional[Any]=4_0478 , _UpperCAmelCase: Any=512 , _UpperCAmelCase: Optional[int]=768 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: int="gelu" , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: List[Any]=0.1 , _UpperCAmelCase: Union[str, Any]=1e-5 , _UpperCAmelCase: Tuple=0.0_2 , _UpperCAmelCase: int="cls_index" , _UpperCAmelCase: str=True , _UpperCAmelCase: str=None , _UpperCAmelCase: str=True , _UpperCAmelCase: Dict=0.1 , **_UpperCAmelCase: Tuple , ):
_lowerCAmelCase :List[str] = vocab_size
_lowerCAmelCase :Dict = n_positions
_lowerCAmelCase :List[Any] = n_embd
_lowerCAmelCase :Tuple = n_layer
_lowerCAmelCase :Optional[Any] = n_head
_lowerCAmelCase :Optional[Any] = afn
_lowerCAmelCase :List[Any] = resid_pdrop
_lowerCAmelCase :List[Any] = embd_pdrop
_lowerCAmelCase :Dict = attn_pdrop
_lowerCAmelCase :Optional[int] = layer_norm_epsilon
_lowerCAmelCase :Dict = initializer_range
_lowerCAmelCase :int = summary_type
_lowerCAmelCase :Dict = summary_use_proj
_lowerCAmelCase :List[str] = summary_activation
_lowerCAmelCase :Optional[int] = summary_first_dropout
_lowerCAmelCase :List[str] = summary_proj_to_labels
super().__init__(**_UpperCAmelCase ) | 687 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.