code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , lowercase : Any , lowercase : Any=7 , lowercase : Any=3 , lowercase : str=18 , lowercase : Union[str, Any]=30 , lowercase : List[Any]=400 , lowercase : Tuple=True , lowercase : str=None , lowercase : Optional[Any]=True , lowercase : List[str]=[0.5, 0.5, 0.5] , lowercase : List[str]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
_snake_case = size if size is not None else {'height': 18, 'width': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
def A ( self : List[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : int = DPTImageProcessor if is_vision_available() else None
def A ( self : Any ):
'''simple docstring'''
_snake_case = DPTImageProcessingTester(self )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : int ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , ) | 686 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 1 |
_lowerCamelCase : dict[tuple[int, int, int], int] = {}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_snake_case = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_snake_case = _calculate(days - 1 , __lowercase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_snake_case = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_snake_case = _calculate(days - 1 , __lowercase , 0 )
_snake_case = state_late + state_absent + state_ontime
_snake_case = prizestrings
return prizestrings
def a_ ( __lowercase : int = 30 ) -> int:
return _calculate(__lowercase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = (UniPCMultistepScheduler,)
_UpperCAmelCase : str = (("num_inference_steps", 2_5),)
def A ( self : Dict , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowercase )
return config
def A ( self : int , lowercase : int=0 , **lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = dict(self.forward_default_kwargs )
_snake_case = kwargs.pop('num_inference_steps' , lowercase )
_snake_case = self.dummy_sample
_snake_case = 0.1 * sample
_snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config(**lowercase )
_snake_case = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
_snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
_snake_case = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
_snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case , _snake_case = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_snake_case = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Any , lowercase : Tuple=0 , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = dict(self.forward_default_kwargs )
_snake_case = kwargs.pop('num_inference_steps' , lowercase )
_snake_case = self.dummy_sample
_snake_case = 0.1 * sample
_snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
_snake_case = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
_snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_snake_case = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : int , lowercase : Optional[Any]=None , **lowercase : Optional[int] ):
'''simple docstring'''
if scheduler is None:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(**lowercase )
_snake_case = scheduler_class(**lowercase )
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(**lowercase )
_snake_case = scheduler_class(**lowercase )
_snake_case = 10
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = model(lowercase , lowercase )
_snake_case = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = dict(self.forward_default_kwargs )
_snake_case = kwargs.pop('num_inference_steps' , lowercase )
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**lowercase )
_snake_case = self.dummy_sample
_snake_case = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , 'set_timesteps' ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , 'set_timesteps' ):
_snake_case = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case = scheduler.timesteps[5]
_snake_case = scheduler.timesteps[6]
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = UniPCMultistepScheduler(**self.get_scheduler_config() )
_snake_case = self.full_loop(scheduler=lowercase )
_snake_case = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
_snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case = self.full_loop(scheduler=lowercase )
_snake_case = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def A ( self : List[Any] ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A ( self : int ):
'''simple docstring'''
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , solver_order=lowercase , solver_type=lowercase , )
def A ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A ( self : List[str] ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
_snake_case = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def A ( self : str ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def A ( self : str ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.full_loop()
_snake_case = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def A ( self : int ):
'''simple docstring'''
_snake_case = self.full_loop(prediction_type='v_prediction' )
_snake_case = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
_snake_case = scheduler_class(**lowercase )
_snake_case = 10
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = model(lowercase , lowercase )
_snake_case = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **lowercase : Union[str, Any] ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config(**lowercase )
_snake_case = scheduler_class(**lowercase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 686 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 1 |
def a_ ( __lowercase : int ) -> Dict:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a_ ( ) -> List[Any]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__lowercase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 686 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : List[str] = 16
_lowerCamelCase : List[Any] = 32
def a_ ( __lowercase : Accelerator , __lowercase : int = 16 ) -> int:
_snake_case = AutoTokenizer.from_pretrained('bert-base-cased' )
_snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(__lowercase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__lowercase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case = 16
elif accelerator.mixed_precision != "no":
_snake_case = 8
else:
_snake_case = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
_snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
_snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Optional[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase ) == "1":
_snake_case = 2
# New Code #
_snake_case = int(args.gradient_accumulation_steps )
_snake_case = int(args.local_sgd_steps )
# Initialize accelerator
_snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config['lr']
_snake_case = int(config['num_epochs'] )
_snake_case = int(config['seed'] )
_snake_case = int(config['batch_size'] )
_snake_case = evaluate.load('glue' , 'mrpc' )
set_seed(__lowercase )
_snake_case , _snake_case = get_dataloaders(__lowercase , __lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Instantiate optimizer
_snake_case = AdamW(params=model.parameters() , lr=__lowercase )
# Instantiate scheduler
_snake_case = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Now we train the model
for epoch in range(__lowercase ):
model.train()
with LocalSGD(
accelerator=__lowercase , model=__lowercase , local_sgd_steps=__lowercase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase ):
_snake_case = model(**__lowercase )
_snake_case = output.loss
accelerator.backward(__lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**__lowercase )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
_snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __lowercase )
def a_ ( ) -> List[str]:
_snake_case = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=__lowercase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_snake_case = parser.parse_args()
_snake_case = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main() | 686 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_lowerCamelCase : List[str] = TypeVar('''KT''')
_lowerCamelCase : Optional[int] = TypeVar('''VT''')
class SCREAMING_SNAKE_CASE__ ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : int , lowercase : KT | str = "root" , lowercase : VT | None = None ):
'''simple docstring'''
_snake_case = key
_snake_case = value
_snake_case = []
def __repr__( self : Any ):
'''simple docstring'''
return f'''Node({self.key}: {self.value})'''
@property
def A ( self : Tuple ):
'''simple docstring'''
return len(self.forward )
class SCREAMING_SNAKE_CASE__ ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : Any , lowercase : float = 0.5 , lowercase : int = 16 ):
'''simple docstring'''
_snake_case = Node[KT, VT]()
_snake_case = 0
_snake_case = p
_snake_case = max_level
def __str__( self : Dict ):
'''simple docstring'''
_snake_case = list(self )
if len(lowercase ) == 0:
return f'''SkipList(level={self.level})'''
_snake_case = max((len(str(lowercase ) ) for item in items) , default=4 )
_snake_case = max(lowercase , 4 ) + 4
_snake_case = self.head
_snake_case = []
_snake_case = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(lowercase , '-' ) + '* ' * len(lowercase ) )
lines.append(' ' * label_size + '| ' * len(lowercase ) )
while len(node.forward ) != 0:
_snake_case = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(lowercase , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(lowercase ) )
_snake_case = node.forward
lines.append('None'.ljust(lowercase ) + '* ' * len(lowercase ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(lowercase )
def __iter__( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_snake_case = node.forward[0]
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def A ( self : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = []
_snake_case = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_snake_case = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def A ( self : str , lowercase : KT ):
'''simple docstring'''
_snake_case , _snake_case = self._locate_node(lowercase )
if node is not None:
for i, update_node in enumerate(lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_snake_case = node.forward[i]
else:
_snake_case = update_node.forward[:i]
def A ( self : Any , lowercase : KT , lowercase : VT ):
'''simple docstring'''
_snake_case , _snake_case = self._locate_node(lowercase )
if node is not None:
_snake_case = value
else:
_snake_case = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowercase ):
update_vector.append(self.head )
_snake_case = level
_snake_case = Node(lowercase , lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase )
else:
_snake_case = new_node
def A ( self : List[str] , lowercase : VT ):
'''simple docstring'''
_snake_case , _snake_case = self._locate_node(lowercase )
if node is not None:
return node.value
return None
def a_ ( ) -> Any:
_snake_case = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
_snake_case = skip_list.head
_snake_case = {}
while node.level != 0:
_snake_case = node.forward[0]
_snake_case = node.value
assert len(__lowercase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def a_ ( ) -> int:
_snake_case = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
_snake_case = skip_list.head
_snake_case = {}
while node.level != 0:
_snake_case = node.forward[0]
_snake_case = node.value
if len(__lowercase ) != 4:
print()
assert len(__lowercase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def a_ ( ) -> List[str]:
_snake_case = SkipList()
assert skip_list.find('Some key' ) is None
def a_ ( ) -> Optional[Any]:
_snake_case = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def a_ ( ) -> str:
_snake_case = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def a_ ( ) -> Any:
_snake_case = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def a_ ( ) -> Any:
_snake_case = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def a_ ( ) -> Optional[int]:
_snake_case = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(__lowercase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__lowercase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def a_ ( ) -> str:
def is_sorted(__lowercase : Any ):
return all(next_item >= item for item, next_item in zip(__lowercase , lst[1:] ) )
_snake_case = SkipList()
for i in range(10 ):
skip_list.insert(__lowercase , __lowercase )
assert is_sorted(list(__lowercase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__lowercase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__lowercase ) )
def a_ ( ) -> List[Any]:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def a_ ( ) -> Optional[int]:
_snake_case = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 686 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : int , lowercase : Optional[int]=7 , lowercase : List[str]=3 , lowercase : Any=30 , lowercase : Any=400 , lowercase : Any=True , lowercase : str=None , lowercase : Union[str, Any]=True , lowercase : List[str]=[0.5, 0.5, 0.5] , lowercase : int=[0.5, 0.5, 0.5] , lowercase : Union[str, Any]=True , lowercase : List[Any]=1 / 255 , lowercase : List[Any]=True , ):
'''simple docstring'''
_snake_case = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_pad
def A ( self : List[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[int] , lowercase : List[Any] , lowercase : List[str]=False ):
'''simple docstring'''
if not batched:
_snake_case = image_inputs[0]
if isinstance(lowercase , Image.Image ):
_snake_case , _snake_case = image.size
else:
_snake_case , _snake_case = image.shape[1], image.shape[2]
if w < h:
_snake_case = int(self.size['shortest_edge'] * h / w )
_snake_case = self.size['shortest_edge']
elif w > h:
_snake_case = self.size['shortest_edge']
_snake_case = int(self.size['shortest_edge'] * w / h )
else:
_snake_case = self.size['shortest_edge']
_snake_case = self.size['shortest_edge']
else:
_snake_case = []
for image in image_inputs:
_snake_case , _snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case = max(lowercase , key=lambda lowercase : item[0] )[0]
_snake_case = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def A ( self : Dict ):
'''simple docstring'''
_snake_case = ConditionalDetrImageProcessingTester(self )
@property
def A ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : int ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowercase )
_snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {'image_id': 39_769, 'annotations': target}
# encode them
_snake_case = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
_snake_case = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt' )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_snake_case = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_snake_case = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
_snake_case = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_snake_case = ConditionalDetrImageProcessor(format='coco_panoptic' )
_snake_case = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt' )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_snake_case = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_snake_case = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify masks
_snake_case = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) ) | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 1 |
from collections import deque
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : str , lowercase : str , lowercase : int , lowercase : int ):
'''simple docstring'''
_snake_case = process_name # process name
_snake_case = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_snake_case = arrival_time
_snake_case = burst_time # remaining burst time
_snake_case = 0 # total time of the process wait in ready queue
_snake_case = 0 # time from arrival time to completion time
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : list[int] , lowercase : deque[Process] , lowercase : int , ):
'''simple docstring'''
_snake_case = number_of_queues
# time slice of queues that round robin algorithm applied
_snake_case = time_slices
# unfinished process is in this ready_queue
_snake_case = queue
# current time
_snake_case = current_time
# finished process is in this sequence queue
_snake_case = deque()
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A ( self : Tuple , lowercase : list[Process] ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A ( self : int , lowercase : list[Process] ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A ( self : Union[str, Any] , lowercase : list[Process] ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A ( self : Dict , lowercase : deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def A ( self : Dict , lowercase : Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A ( self : List[Any] , lowercase : deque[Process] ):
'''simple docstring'''
_snake_case = deque() # sequence deque of finished process
while len(lowercase ) != 0:
_snake_case = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_snake_case = 0
# set the process's turnaround time because it is finished
_snake_case = self.current_time - cp.arrival_time
# set the completion time
_snake_case = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase )
self.finish_queue.extend(lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A ( self : Optional[Any] , lowercase : deque[Process] , lowercase : int ):
'''simple docstring'''
_snake_case = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase ) ):
_snake_case = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_snake_case = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_snake_case = 0
# set the finish time
_snake_case = self.current_time
# update the process' turnaround time because it is finished
_snake_case = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase )
self.finish_queue.extend(lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A ( self : Optional[Any] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_snake_case , _snake_case = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowerCamelCase : Union[str, Any] = Process('''P1''', 0, 53)
_lowerCamelCase : Optional[int] = Process('''P2''', 0, 17)
_lowerCamelCase : Optional[int] = Process('''P3''', 0, 68)
_lowerCamelCase : str = Process('''P4''', 0, 24)
_lowerCamelCase : List[str] = 3
_lowerCamelCase : Dict = [17, 25]
_lowerCamelCase : List[str] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_lowerCamelCase : Optional[Any] = Process('''P1''', 0, 53)
_lowerCamelCase : Tuple = Process('''P2''', 0, 17)
_lowerCamelCase : Any = Process('''P3''', 0, 68)
_lowerCamelCase : Any = Process('''P4''', 0, 24)
_lowerCamelCase : int = 3
_lowerCamelCase : Tuple = [17, 25]
_lowerCamelCase : Optional[Any] = deque([Pa, Pa, Pa, Pa])
_lowerCamelCase : Optional[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowerCamelCase : Any = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
) | 686 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 1 |
import unittest
from knapsack import greedy_knapsack as kp
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = [10, 20, 30, 40, 50, 60]
_snake_case = [2, 4, 6, 8, 10, 12]
_snake_case = 100
self.assertEqual(kp.calc_profit(lowercase , lowercase , lowercase ) , 210 )
def A ( self : int ):
'''simple docstring'''
self.assertRaisesRegex(lowercase , 'max_weight must greater than zero.' )
def A ( self : str ):
'''simple docstring'''
self.assertRaisesRegex(lowercase , 'Weight can not be negative.' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.assertRaisesRegex(lowercase , 'Profit can not be negative.' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.assertRaisesRegex(lowercase , 'max_weight must greater than zero.' )
def A ( self : Optional[int] ):
'''simple docstring'''
self.assertRaisesRegex(
lowercase , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main() | 686 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=10 , lowercase : List[Any]=3 , lowercase : Tuple=2 , lowercase : Union[str, Any]=2 , lowercase : int=True , lowercase : List[str]=True , lowercase : List[Any]=32 , lowercase : Optional[int]=5 , lowercase : str=4 , lowercase : Optional[int]=37 , lowercase : Optional[int]="gelu" , lowercase : Optional[int]=0.1 , lowercase : List[Any]=0.1 , lowercase : Any=10 , lowercase : List[Any]=0.02 , lowercase : Union[str, Any]="divided_space_time" , lowercase : List[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = patch_size
_snake_case = num_frames
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = attention_type
_snake_case = initializer_range
_snake_case = scope
_snake_case = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_snake_case = (image_size // patch_size) ** 2
_snake_case = (num_frames) * self.num_patches_per_frame + 1
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_snake_case = self.num_labels
return config
def A ( self : Dict , lowercase : Dict , lowercase : List[Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = TimesformerModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[int] , lowercase : str , lowercase : int , lowercase : int ):
'''simple docstring'''
_snake_case = TimesformerForVideoClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
# verify the logits shape
_snake_case = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCAmelCase : Any = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = TimesformerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : int , lowercase : Optional[Any] , lowercase : int , lowercase : Optional[Any]=False ):
'''simple docstring'''
_snake_case = copy.deepcopy(lowercase )
if return_labels:
if model_class in get_values(lowercase ):
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TimesformerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
for model_class in self.all_model_classes:
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.num_frames
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_snake_case = len(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 1 , len(lowercase ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def A ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[int] ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase ) , lowercase )
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(__lowercase )
return list(__lowercase )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_video()
_snake_case = image_processor(video[:8] , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) | 686 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {'''vocab_file''': '''vocab.txt'''}
_lowerCamelCase : Tuple = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
_lowerCamelCase : Optional[int] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
_lowerCamelCase : List[Any] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[Any] = ConvBertTokenizer
def __init__( self : List[str] , lowercase : Optional[Any]=None , lowercase : List[Any]=None , lowercase : Optional[Any]=True , lowercase : Optional[int]="[UNK]" , lowercase : str="[SEP]" , lowercase : Union[str, Any]="[PAD]" , lowercase : Union[str, Any]="[CLS]" , lowercase : Tuple="[MASK]" , lowercase : List[str]=True , lowercase : str=None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars
):
_snake_case = getattr(lowercase , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowercase )
_snake_case = do_lower_case
def A ( self : List[str] , lowercase : Tuple , lowercase : List[str]=None ):
'''simple docstring'''
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Optional[int] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Union[str, Any] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
_snake_case = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase ) | 686 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : int = BlenderbotConfig
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Dict , lowercase : int , lowercase : str=13 , lowercase : Optional[int]=7 , lowercase : Union[str, Any]=True , lowercase : str=False , lowercase : str=99 , lowercase : str=32 , lowercase : int=2 , lowercase : Optional[Any]=4 , lowercase : int=37 , lowercase : Any=0.1 , lowercase : List[str]=0.1 , lowercase : Union[str, Any]=20 , lowercase : List[Any]=2 , lowercase : Tuple=1 , lowercase : List[Any]=0 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
def A ( self : Dict ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case = prepare_blenderbot_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def A ( self : Union[str, Any] , lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = TFBlenderbotModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = inputs_dict['head_mask']
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : List[str] , __lowercase : Optional[Any]=None , __lowercase : Tuple=None , __lowercase : int=None , __lowercase : Optional[int]=None , __lowercase : int=None , ) -> List[Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : int = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_UpperCAmelCase : int = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : List[Any] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = False
def A ( self : str ):
'''simple docstring'''
_snake_case = TFBlenderbotModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : int = ["My friends are cool but they eat too many carbs."]
_UpperCAmelCase : int = "facebook/blenderbot-400M-distill"
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A ( self : str ):
'''simple docstring'''
_snake_case = self.tokenizer(self.src_text , return_tensors='tf' )
_snake_case = self.model.generate(
model_inputs.input_ids , )
_snake_case = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
) | 686 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 1 |
import argparse
import copy
def a_ ( __lowercase : Optional[int] ) -> str:
_snake_case = {}
with open(__lowercase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_snake_case = []
_list.append([line.split()[1], line.split()[2]] )
_snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_snake_case = []
_list.append([line.split()[0], line.split()[2]] )
_snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a_ ( __lowercase : Optional[int] , __lowercase : Dict ) -> Optional[int]:
with open(__lowercase ) as f:
_snake_case = f.read(1 )
_snake_case = start_node
_snake_case = []
_snake_case = start_node
_snake_case = 0
while visiting not in first_solution:
_snake_case = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowercase ) and k[0] not in first_solution:
_snake_case = k[1]
_snake_case = k[0]
first_solution.append(__lowercase )
_snake_case = distance_of_first_solution + int(__lowercase )
_snake_case = best_node
first_solution.append(__lowercase )
_snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def a_ ( __lowercase : List[str] , __lowercase : List[str] ) -> Optional[int]:
_snake_case = []
for n in solution[1:-1]:
_snake_case = solution.index(__lowercase )
for kn in solution[1:-1]:
_snake_case = solution.index(__lowercase )
if n == kn:
continue
_snake_case = copy.deepcopy(__lowercase )
_snake_case = kn
_snake_case = n
_snake_case = 0
for k in _tmp[:-1]:
_snake_case = _tmp[_tmp.index(__lowercase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_snake_case = distance + int(i[1] )
_tmp.append(__lowercase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowercase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a_ ( __lowercase : str , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : List[Any] ) -> Optional[int]:
_snake_case = 1
_snake_case = first_solution
_snake_case = []
_snake_case = distance_of_first_solution
_snake_case = solution
while count <= iters:
_snake_case = find_neighborhood(__lowercase , __lowercase )
_snake_case = 0
_snake_case = neighborhood[index_of_best_solution]
_snake_case = len(__lowercase ) - 1
_snake_case = False
while not found:
_snake_case = 0
while i < len(__lowercase ):
if best_solution[i] != solution[i]:
_snake_case = best_solution[i]
_snake_case = solution[i]
break
_snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_snake_case = True
_snake_case = best_solution[:-1]
_snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_snake_case = cost
_snake_case = solution
else:
_snake_case = index_of_best_solution + 1
_snake_case = neighborhood[index_of_best_solution]
if len(__lowercase ) >= size:
tabu_list.pop(0 )
_snake_case = count + 1
return best_solution_ever, best_cost
def a_ ( __lowercase : Optional[Any]=None ) -> List[str]:
_snake_case = generate_neighbours(args.File )
_snake_case , _snake_case = generate_first_solution(
args.File , __lowercase )
_snake_case , _snake_case = tabu_search(
__lowercase , __lowercase , __lowercase , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args()) | 686 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
def a_ ( __lowercase : dict ) -> set:
_snake_case = set()
# edges = list of graph's edges
_snake_case = get_edges(__lowercase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_snake_case , _snake_case = edges.pop()
chosen_vertices.add(__lowercase )
chosen_vertices.add(__lowercase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowercase )
return chosen_vertices
def a_ ( __lowercase : dict ) -> set:
_snake_case = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 686 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 1 |
import math
def a_ ( __lowercase : int , __lowercase : List[str] ) -> Tuple:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__lowercase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_lowerCamelCase : Optional[Any] = '''Enter the base and the power separated by a comma: '''
_lowerCamelCase , _lowerCamelCase : Optional[Any] = map(int, input(prompt).split(''','''))
_lowerCamelCase , _lowerCamelCase : Tuple = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
_lowerCamelCase : Optional[int] = res(xa, ya)
_lowerCamelCase : List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''') | 686 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def a_ ( __lowercase : Tuple ) -> List[Any]:
_snake_case = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , __lowercase ).groups()[0]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : int , lowercase : str=None , lowercase : Optional[int]=None ):
'''simple docstring'''
_snake_case = file_names
_snake_case = image_transform
_snake_case = label_to_id
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self : int , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = self.file_names[idx]
_snake_case = PIL.Image.open(lowercase )
_snake_case = raw_image.convert('RGB' )
if self.image_transform is not None:
_snake_case = self.image_transform(lowercase )
_snake_case = extract_label(lowercase )
if self.label_to_id is not None:
_snake_case = self.label_to_id[label]
return {"image": image, "label": label}
def a_ ( __lowercase : int , __lowercase : int ) -> List[Any]:
# Initialize accelerator
if args.with_tracking:
_snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
_snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config['lr']
_snake_case = int(config['num_epochs'] )
_snake_case = int(config['seed'] )
_snake_case = int(config['batch_size'] )
_snake_case = config['image_size']
if not isinstance(__lowercase , (list, tuple) ):
_snake_case = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
_snake_case = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_snake_case = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
_snake_case = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_snake_case = os.path.split(__lowercase )[-1].split('.' )[0]
accelerator.init_trackers(__lowercase , __lowercase )
# Grab all the image filenames
_snake_case = [os.path.join(args.data_dir , __lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
_snake_case = [extract_label(__lowercase ) for fname in file_names]
_snake_case = list(set(__lowercase ) )
id_to_label.sort()
_snake_case = {lbl: i for i, lbl in enumerate(__lowercase )}
# Set the seed before splitting the data.
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# Split our filenames between train and validation
_snake_case = np.random.permutation(len(__lowercase ) )
_snake_case = int(0.8 * len(__lowercase ) )
_snake_case = random_perm[:cut]
_snake_case = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_snake_case = Compose([RandomResizedCrop(__lowercase , scale=(0.5, 1.0) ), ToTensor()] )
_snake_case = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__lowercase , label_to_id=__lowercase )
# For evaluation, we use a deterministic Resize
_snake_case = Compose([Resize(__lowercase ), ToTensor()] )
_snake_case = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowercase , label_to_id=__lowercase )
# Instantiate dataloaders.
_snake_case = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
_snake_case = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = create_model('resnet50d' , pretrained=__lowercase , num_classes=len(__lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_snake_case = False
for param in model.get_classifier().parameters():
_snake_case = True
# We normalize the batches of images to be a bit faster.
_snake_case = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
_snake_case = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_snake_case = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_snake_case = OneCycleLR(optimizer=__lowercase , max_lr=__lowercase , epochs=__lowercase , steps_per_epoch=len(__lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# We need to keep track of how many total steps we have iterated over
_snake_case = 0
# We also need to keep track of the starting epoch so files are named properly
_snake_case = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
_snake_case = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_snake_case = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_snake_case = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_snake_case = os.path.splitext(__lowercase )[0]
if "epoch" in training_difference:
_snake_case = int(training_difference.replace('epoch_' , '' ) ) + 1
_snake_case = None
else:
_snake_case = int(training_difference.replace('step_' , '' ) )
_snake_case = resume_step // len(__lowercase )
resume_step -= starting_epoch * len(__lowercase )
# Now we train the model
for epoch in range(__lowercase , __lowercase ):
model.train()
if args.with_tracking:
_snake_case = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_snake_case = accelerator.skip_first_batches(__lowercase , __lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_snake_case = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_snake_case = {k: v.to(accelerator.device ) for k, v in batch.items()}
_snake_case = (batch['image'] - mean) / std
_snake_case = model(__lowercase )
_snake_case = torch.nn.functional.cross_entropy(__lowercase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__lowercase , __lowercase ):
_snake_case = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_snake_case = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
model.eval()
_snake_case = 0
_snake_case = 0
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_snake_case = {k: v.to(accelerator.device ) for k, v in batch.items()}
_snake_case = (batch['image'] - mean) / std
with torch.no_grad():
_snake_case = model(__lowercase )
_snake_case = outputs.argmax(dim=-1 )
_snake_case , _snake_case = accelerator.gather_for_metrics((predictions, batch['label']) )
_snake_case = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_snake_case = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(__lowercase ),
'epoch': epoch,
} , step=__lowercase , )
if checkpointing_steps == "epoch":
_snake_case = f'''epoch_{epoch}'''
if args.output_dir is not None:
_snake_case = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
if args.with_tracking:
accelerator.end_training()
def a_ ( ) -> List[Any]:
_snake_case = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=__lowercase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=__lowercase , default=__lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=__lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=__lowercase , default=__lowercase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=__lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
_snake_case = parser.parse_args()
_snake_case = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main() | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 1 |
import os
def a_ ( __lowercase : str = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(__lowercase ) , __lowercase ) ) as in_file:
_snake_case = in_file.read()
_snake_case = [[int(__lowercase ) for cell in row.split(',' )] for row in data.strip().splitlines()]
_snake_case = [[0 for cell in row] for row in grid]
_snake_case = len(grid[0] )
_snake_case = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
_snake_case = grid[0][0]
for i in range(1 , __lowercase ):
_snake_case = grid[0][i] + dp[0][i - 1]
for i in range(1 , __lowercase ):
_snake_case = grid[i][0] + dp[i - 1][0]
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
_snake_case = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }') | 686 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Dict = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = XLNetTokenizer
_UpperCAmelCase : Dict = XLNetTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : int = True
def A ( self : Any ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case = XLNetTokenizer(lowercase , keep_accents=lowercase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = '<s>'
_snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(lowercase ) , 1_006 )
def A ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = XLNetTokenizer(lowercase , keep_accents=lowercase )
_snake_case = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] )
_snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_snake_case = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_snake_case = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = XLNetTokenizer(lowercase , do_lower_case=lowercase )
_snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = XLNetTokenizer(lowercase , do_lower_case=lowercase )
_snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = {'input_ids': [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , ) | 686 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = DanceDiffusionPipeline
_UpperCAmelCase : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase : Any = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase : str = False
_UpperCAmelCase : str = False
def A ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
_snake_case = IPNDMScheduler()
_snake_case = {
'unet': unet,
'scheduler': scheduler,
}
return components
def A ( self : Tuple , lowercase : List[str] , lowercase : Tuple=0 ):
'''simple docstring'''
if str(lowercase ).startswith('mps' ):
_snake_case = torch.manual_seed(lowercase )
else:
_snake_case = torch.Generator(device=lowercase ).manual_seed(lowercase )
_snake_case = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = DanceDiffusionPipeline(**lowercase )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs(lowercase )
_snake_case = pipe(**lowercase )
_snake_case = output.audios
_snake_case = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_snake_case = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def A ( self : List[Any] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def A ( self : int ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def A ( self : Tuple ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def A ( self : int ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def A ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = torch_device
_snake_case = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
_snake_case = output.audios
_snake_case = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_snake_case = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Dict ):
'''simple docstring'''
_snake_case = torch_device
_snake_case = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
_snake_case = output.audios
_snake_case = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_snake_case = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 | 686 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : str ):
'''simple docstring'''
debug_launcher(test_script.main )
def A ( self : Any ):
'''simple docstring'''
debug_launcher(test_ops.main ) | 686 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 1 |
def a_ ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(__lowercase , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }') | 686 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 1 |
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
_lowerCamelCase : List[str] = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
_lowerCamelCase : Dict = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def A ( self : str , lowercase : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
_snake_case = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
_snake_case = evaluate(dataset=lowercase , predictions=lowercase )
return score | 686 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 1 |
def a_ ( __lowercase : int , __lowercase : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def a_ ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 686 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = "lxmert"
_UpperCAmelCase : Optional[Any] = {}
def __init__( self : Optional[int] , lowercase : int=30_522 , lowercase : Optional[Any]=768 , lowercase : Optional[Any]=12 , lowercase : Optional[int]=9_500 , lowercase : Optional[Any]=1_600 , lowercase : Optional[Any]=400 , lowercase : Union[str, Any]=3_072 , lowercase : Tuple="gelu" , lowercase : Union[str, Any]=0.1 , lowercase : Optional[int]=0.1 , lowercase : int=512 , lowercase : Optional[int]=2 , lowercase : Tuple=0.02 , lowercase : List[Any]=1E-12 , lowercase : Any=9 , lowercase : List[str]=5 , lowercase : Any=5 , lowercase : Optional[Any]=2_048 , lowercase : int=4 , lowercase : Dict=6.67 , lowercase : Any=True , lowercase : Dict=True , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : List[str]=True , **lowercase : Dict , ):
'''simple docstring'''
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = num_qa_labels
_snake_case = num_object_labels
_snake_case = num_attr_labels
_snake_case = l_layers
_snake_case = x_layers
_snake_case = r_layers
_snake_case = visual_feat_dim
_snake_case = visual_pos_dim
_snake_case = visual_loss_normalizer
_snake_case = task_matched
_snake_case = task_mask_lm
_snake_case = task_obj_predict
_snake_case = task_qa
_snake_case = visual_obj_loss
_snake_case = visual_attr_loss
_snake_case = visual_feat_loss
_snake_case = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**lowercase ) | 686 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def a_ ( __lowercase : str , __lowercase : str ) -> Union[str, Any]:
_snake_case = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
_snake_case = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_snake_case = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=__lowercase , output_all_encodings=__lowercase , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , __lowercase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_snake_case = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
_snake_case = os.path.join(get_home_dir() , 'models' )
_snake_case = _load_vocab(__lowercase , __lowercase , __lowercase , cls=__lowercase )
_snake_case = nlp.model.BERTModel(
__lowercase , len(__lowercase ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=__lowercase , use_token_type_embed=__lowercase , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=__lowercase , use_decoder=__lowercase , )
original_bort.load_parameters(__lowercase , cast_dtype=__lowercase , ignore_extra=__lowercase )
_snake_case = original_bort._collect_params_with_prefix()
# Build our config 🤗
_snake_case = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.0_2,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(__lowercase ),
}
_snake_case = BertConfig.from_dict(__lowercase )
_snake_case = BertForMaskedLM(__lowercase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowercase : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowercase : Optional[int] , __lowercase : Optional[int] ):
_snake_case = hf_param.shape
_snake_case = to_torch(params[gluon_param] )
_snake_case = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
_snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
_snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
_snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
_snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_snake_case = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_snake_case = hf_bort_model.bert.encoder.layer[i]
# self attention
_snake_case = layer.attention.self
_snake_case = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
_snake_case = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
_snake_case = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
_snake_case = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
_snake_case = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
_snake_case = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
_snake_case = layer.attention.output
_snake_case = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
_snake_case = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
_snake_case = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
_snake_case = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
_snake_case = layer.intermediate
_snake_case = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
_snake_case = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
_snake_case = layer.output
_snake_case = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
_snake_case = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
_snake_case = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
_snake_case = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_snake_case = RobertaTokenizer.from_pretrained('roberta-base' )
_snake_case = tokenizer.encode_plus(__lowercase )['input_ids']
# Get gluon output
_snake_case = mx.nd.array([input_ids] )
_snake_case = original_bort(inputs=__lowercase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowercase )
_snake_case = BertModel.from_pretrained(__lowercase )
hf_bort_model.eval()
_snake_case = tokenizer.encode_plus(__lowercase , return_tensors='pt' )
_snake_case = hf_bort_model(**__lowercase )[0]
_snake_case = output_gluon[0].asnumpy()
_snake_case = output_hf[0].detach().numpy()
_snake_case = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_snake_case = np.allclose(__lowercase , __lowercase , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 686 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
from datetime import datetime as dt
import os
from github import Github
_lowerCamelCase : List[str] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a_ ( ) -> Dict:
_snake_case = Github(os.environ['GITHUB_TOKEN'] )
_snake_case = g.get_repo('huggingface/transformers' )
_snake_case = repo.get_issues(state='open' )
for issue in open_issues:
_snake_case = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_snake_case = comments[0] if len(__lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main() | 686 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
def a_ ( __lowercase : int ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_snake_case = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_snake_case = 1
if upper_limit > 0:
_snake_case = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
_lowerCamelCase : Optional[Any] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = filter(lambda __lowercase : p.requires_grad , model.parameters() )
_snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCamelCase : Tuple = logging.getLogger(__name__)
def a_ ( __lowercase : str , __lowercase : Union[str, Any] ) -> Optional[int]:
if metric == "rouge2":
_snake_case = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_snake_case = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_snake_case = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_snake_case = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
_snake_case = ModelCheckpoint(
dirpath=__lowercase , filename=__lowercase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def a_ ( __lowercase : Optional[Any] , __lowercase : Union[str, Any] ) -> Optional[int]:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__lowercase , verbose=__lowercase , )
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
'''simple docstring'''
def A ( self : Tuple , lowercase : int , lowercase : str ):
'''simple docstring'''
_snake_case = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule , lowercase : str , lowercase : Optional[int]=True ):
'''simple docstring'''
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case = od / 'test_results.txt'
_snake_case = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
_snake_case = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , 'a+' ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case = metrics[key]
if isinstance(lowercase , torch.Tensor ):
_snake_case = val.item()
_snake_case = f'''{key}: {val:.6f}\n'''
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
_snake_case = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowercase )
@rank_zero_only
def A ( self : int , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
try:
_snake_case = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case = pl_module.model.num_parameters()
_snake_case = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def A ( self : List[str] , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , 'test' )
@rank_zero_only
def A ( self : Dict , lowercase : pl.Trainer , lowercase : Tuple ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 686 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase : Union[str, Any] , lowercase : Tuple=13 , lowercase : Optional[int]=10 , lowercase : Union[str, Any]=3 , lowercase : Any=2 , lowercase : Optional[int]=2 , lowercase : Dict=2 , lowercase : Any=True , lowercase : Optional[Any]=True , lowercase : Optional[int]=32 , lowercase : Dict=5 , lowercase : List[Any]=4 , lowercase : str=37 , lowercase : Union[str, Any]="gelu" , lowercase : Tuple=0.1 , lowercase : Optional[int]=0.1 , lowercase : Tuple=10 , lowercase : Dict=0.02 , lowercase : str=0.9 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = patch_size
_snake_case = tubelet_size
_snake_case = num_frames
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = mask_ratio
_snake_case = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_snake_case = (image_size // patch_size) ** 2
_snake_case = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_snake_case = int(mask_ratio * self.seq_length )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Any ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : List[Any] , lowercase : Any , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = VideoMAEModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = VideoMAEForPreTraining(lowercase )
model.to(lowercase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_snake_case = torch.ones((self.num_masks,) )
_snake_case = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_snake_case = mask.expand(self.batch_size , -1 ).bool()
_snake_case = model(lowercase , lowercase )
# model only returns predictions for masked patches
_snake_case = mask.sum().item()
_snake_case = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def A ( self : int ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : int = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_UpperCAmelCase : List[Any] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : int = False
def A ( self : Dict ):
'''simple docstring'''
_snake_case = VideoMAEModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : str , lowercase : Optional[Any] , lowercase : int , lowercase : Union[str, Any]=False ):
'''simple docstring'''
_snake_case = copy.deepcopy(lowercase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_snake_case = torch.ones((self.model_tester.num_masks,) )
_snake_case = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_snake_case = mask.expand(self.model_tester.batch_size , -1 ).bool()
_snake_case = bool_masked_pos.to(lowercase )
if return_labels:
if model_class in [
*get_values(lowercase ),
]:
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = VideoMAEModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
for model_class in self.all_model_classes:
_snake_case = self.model_tester.seq_length - self.model_tester.num_masks
_snake_case = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_snake_case = len(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 1 , len(lowercase ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : str , lowercase : List[str] , lowercase : Dict ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase ) , lowercase )
_snake_case = self.model_tester.seq_length - self.model_tester.num_masks
_snake_case = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def a_ ( ) -> Dict:
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(__lowercase )
return list(__lowercase )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Tuple ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_video()
_snake_case = image_processor(lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_video()
_snake_case = image_processor(lowercase , return_tensors='pt' ).to(lowercase )
# add boolean mask, indicating which patches to mask
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=lowercase )
self.assertEqual(outputs.logits.shape , lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_snake_case = torch.tensor([0.5142] , device=lowercase )
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_snake_case = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=lowercase ).to(
lowercase )
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = torch.tensor(torch.tensor([0.6469] ) , device=lowercase )
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1E-4 ) ) | 686 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowercase : int , lowercase : int , lowercase : int , lowercase : Union[str, Any]=0.0 , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = True , lowercase : str = "layer_norm" , lowercase : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case = only_cross_attention
_snake_case = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
_snake_case = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_snake_case = AdaLayerNorm(lowercase , lowercase )
elif self.use_ada_layer_norm_zero:
_snake_case = AdaLayerNormZero(lowercase , lowercase )
else:
_snake_case = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
_snake_case = Attention(
query_dim=lowercase , heads=lowercase , dim_head=lowercase , dropout=lowercase , bias=lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_snake_case = (
AdaLayerNorm(lowercase , lowercase )
if self.use_ada_layer_norm
else nn.LayerNorm(lowercase , elementwise_affine=lowercase )
)
_snake_case = Attention(
query_dim=lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowercase , dim_head=lowercase , dropout=lowercase , bias=lowercase , upcast_attention=lowercase , ) # is self-attn if encoder_hidden_states is none
else:
_snake_case = None
_snake_case = None
# 3. Feed-forward
_snake_case = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
_snake_case = FeedForward(lowercase , dropout=lowercase , activation_fn=lowercase , final_dropout=lowercase )
# let chunk size default to None
_snake_case = None
_snake_case = 0
def A ( self : int , lowercase : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = chunk_size
_snake_case = dim
def A ( self : Any , lowercase : torch.FloatTensor , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Dict[str, Any] = None , lowercase : Optional[torch.LongTensor] = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
_snake_case = self.norma(lowercase , lowercase )
elif self.use_ada_layer_norm_zero:
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.norma(
lowercase , lowercase , lowercase , hidden_dtype=hidden_states.dtype )
else:
_snake_case = self.norma(lowercase )
_snake_case = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_snake_case = self.attna(
lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowercase , **lowercase , )
if self.use_ada_layer_norm_zero:
_snake_case = gate_msa.unsqueeze(1 ) * attn_output
_snake_case = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_snake_case = (
self.norma(lowercase , lowercase ) if self.use_ada_layer_norm else self.norma(lowercase )
)
_snake_case = self.attna(
lowercase , encoder_hidden_states=lowercase , attention_mask=lowercase , **lowercase , )
_snake_case = attn_output + hidden_states
# 3. Feed-forward
_snake_case = self.norma(lowercase )
if self.use_ada_layer_norm_zero:
_snake_case = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
_snake_case = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_snake_case = torch.cat(
[self.ff(lowercase ) for hid_slice in norm_hidden_states.chunk(lowercase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_snake_case = self.ff(lowercase )
if self.use_ada_layer_norm_zero:
_snake_case = gate_mlp.unsqueeze(1 ) * ff_output
_snake_case = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : Optional[int] = None , lowercase : int = 4 , lowercase : float = 0.0 , lowercase : str = "geglu" , lowercase : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case = int(dim * mult )
_snake_case = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_snake_case = GELU(lowercase , lowercase )
if activation_fn == "gelu-approximate":
_snake_case = GELU(lowercase , lowercase , approximate='tanh' )
elif activation_fn == "geglu":
_snake_case = GEGLU(lowercase , lowercase )
elif activation_fn == "geglu-approximate":
_snake_case = ApproximateGELU(lowercase , lowercase )
_snake_case = nn.ModuleList([] )
# project in
self.net.append(lowercase )
# project dropout
self.net.append(nn.Dropout(lowercase ) )
# project out
self.net.append(nn.Linear(lowercase , lowercase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowercase ) )
def A ( self : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
for module in self.net:
_snake_case = module(lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : str = "none" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(lowercase , lowercase )
_snake_case = approximate
def A ( self : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowercase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def A ( self : List[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.proj(lowercase )
_snake_case = self.gelu(lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(lowercase , dim_out * 2 )
def A ( self : str , lowercase : int ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowercase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def A ( self : List[Any] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case = self.proj(lowercase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : int , lowercase : int ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(lowercase , lowercase )
def A ( self : Tuple , lowercase : Dict ):
'''simple docstring'''
_snake_case = self.proj(lowercase )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : List[Any] , lowercase : List[str] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Embedding(lowercase , lowercase )
_snake_case = nn.SiLU()
_snake_case = nn.Linear(lowercase , embedding_dim * 2 )
_snake_case = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = self.linear(self.silu(self.emb(lowercase ) ) )
_snake_case , _snake_case = torch.chunk(lowercase , 2 )
_snake_case = self.norm(lowercase ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : Any , lowercase : str ):
'''simple docstring'''
super().__init__()
_snake_case = CombinedTimestepLabelEmbeddings(lowercase , lowercase )
_snake_case = nn.SiLU()
_snake_case = nn.Linear(lowercase , 6 * embedding_dim , bias=lowercase )
_snake_case = nn.LayerNorm(lowercase , elementwise_affine=lowercase , eps=1E-6 )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any , lowercase : Tuple , lowercase : Dict=None ):
'''simple docstring'''
_snake_case = self.linear(self.silu(self.emb(lowercase , lowercase , hidden_dtype=lowercase ) ) )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = emb.chunk(6 , dim=1 )
_snake_case = self.norm(lowercase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : int , lowercase : Optional[str] = None , lowercase : float = 1E-5 ):
'''simple docstring'''
super().__init__()
_snake_case = num_groups
_snake_case = eps
if act_fn is None:
_snake_case = None
else:
_snake_case = get_activation(lowercase )
_snake_case = nn.Linear(lowercase , out_dim * 2 )
def A ( self : Optional[Any] , lowercase : Tuple , lowercase : List[Any] ):
'''simple docstring'''
if self.act:
_snake_case = self.act(lowercase )
_snake_case = self.linear(lowercase )
_snake_case = emb[:, :, None, None]
_snake_case , _snake_case = emb.chunk(2 , dim=1 )
_snake_case = F.group_norm(lowercase , self.num_groups , eps=self.eps )
_snake_case = x * (1 + scale) + shift
return x | 686 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 1 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase : Any ):
'''simple docstring'''
_snake_case = data
_snake_case = None
def __repr__( self : Optional[int] ):
'''simple docstring'''
return f'''Node({self.data})'''
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case = None
def __iter__( self : List[str] ):
'''simple docstring'''
_snake_case = self.head
while node:
yield node.data
_snake_case = node.next
def __len__( self : Optional[Any] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : Optional[int] ):
'''simple docstring'''
return "->".join([str(lowercase ) for item in self] )
def __getitem__( self : Any , lowercase : int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : str , lowercase : int , lowercase : Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_snake_case = self.head
for _ in range(lowercase ):
_snake_case = current.next
_snake_case = data
def A ( self : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , lowercase )
def A ( self : int , lowercase : Any ):
'''simple docstring'''
self.insert_nth(0 , lowercase )
def A ( self : int , lowercase : int , lowercase : Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_snake_case = Node(lowercase )
if self.head is None:
_snake_case = new_node
elif index == 0:
_snake_case = self.head # link new_node to head
_snake_case = new_node
else:
_snake_case = self.head
for _ in range(index - 1 ):
_snake_case = temp.next
_snake_case = temp.next
_snake_case = new_node
def A ( self : List[str] ): # print every node data
'''simple docstring'''
print(self )
def A ( self : Tuple ):
'''simple docstring'''
return self.delete_nth(0 )
def A ( self : Optional[int] ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A ( self : Optional[int] , lowercase : int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_snake_case = self.head # default first node
if index == 0:
_snake_case = self.head.next
else:
_snake_case = self.head
for _ in range(index - 1 ):
_snake_case = temp.next
_snake_case = temp.next
_snake_case = temp.next.next
return delete_node.data
def A ( self : str ):
'''simple docstring'''
return self.head is None
def A ( self : str ):
'''simple docstring'''
_snake_case = None
_snake_case = self.head
while current:
# Store the current node's next node.
_snake_case = current.next
# Make the current node's next point backwards
_snake_case = prev
# Make the previous node be the current node
_snake_case = current
# Make the current node the next node (to progress iteration)
_snake_case = next_node
# Return prev in order to put the head at the end
_snake_case = prev
def a_ ( ) -> None:
_snake_case = LinkedList()
assert linked_list.is_empty() is True
assert str(__lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__lowercase ) == i
linked_list.insert_nth(__lowercase , i + 1 )
assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__lowercase ) == 9
assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_snake_case = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(-8 , 1 ) )
def a_ ( ) -> None:
_snake_case = [
-9,
100,
Node(77_345_112 ),
'dlrow olleH',
7,
5_555,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
_snake_case = LinkedList()
for i in test_input:
linked_list.insert_tail(__lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_snake_case = linked_list.delete_head()
assert result == -9
assert (
str(__lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_snake_case = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_snake_case = linked_list.delete_nth(10 )
assert result is None
assert (
str(__lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__lowercase )
assert (
str(__lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a_ ( ) -> Any:
from doctest import testmod
testmod()
_snake_case = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
_snake_case = input('Enter New Value: ' ).strip()
print('New list:' )
print(__lowercase )
print(f'''length of linked_list is : {len(__lowercase )}''' )
if __name__ == "__main__":
main() | 686 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Dict ) -> List[str]:
# Initialise PyTorch model
_snake_case = BertConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = BertForPreTraining(__lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 686 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : List[Any] , lowercase : Union[str, Any]=7 , lowercase : str=3 , lowercase : Dict=18 , lowercase : Optional[Any]=30 , lowercase : List[Any]=400 , lowercase : int=True , lowercase : Optional[Any]=None , lowercase : int=True , ):
'''simple docstring'''
_snake_case = size if size is not None else {'height': 18, 'width': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = apply_ocr
def A ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = LayoutLMvaImageProcessingTester(self )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'apply_ocr' ) )
def A ( self : int ):
'''simple docstring'''
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowercase )
self.assertIsInstance(encoding.boxes , lowercase )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
_snake_case = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_snake_case = Image.open(ds[0]['file'] ).convert('RGB' )
_snake_case = image_processing(lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_snake_case = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_snake_case = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase )
self.assertListEqual(encoding.boxes , lowercase )
# with apply_OCR = False
_snake_case = LayoutLMvaImageProcessor(apply_ocr=lowercase )
_snake_case = image_processing(lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 686 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = LEDTokenizer
_UpperCAmelCase : List[Any] = LEDTokenizerFast
_UpperCAmelCase : str = True
def A ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowercase , range(len(lowercase ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def A ( self : Optional[Any] , **lowercase : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def A ( self : List[str] ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def A ( self : Dict ):
'''simple docstring'''
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowercase , max_length=len(lowercase ) , padding=lowercase , return_tensors='pt' )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase , lowercase )
@require_torch
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors='pt' )
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('labels' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
@require_torch
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowercase , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def A ( self : Any ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=lowercase , truncation=lowercase , return_tensors='pt' )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowercase , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowercase , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def A ( self : Dict ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowercase , padding=lowercase )
_snake_case = [[0] * len(lowercase ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowercase )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_snake_case = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
_snake_case = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) | 686 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : Tuple = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def a_ ( __lowercase : Any , __lowercase : int , __lowercase : List[Any] ) -> List[Any]:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : int ) -> int:
_snake_case = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_snake_case = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
_snake_case = value
else:
_snake_case = value
return new_state_dict
def a_ ( __lowercase : int , __lowercase : Any=False ) -> Optional[int]:
_snake_case = ''
if is_panoptic:
_snake_case = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:256, :]
_snake_case = in_proj_bias[:256]
_snake_case = in_proj_weight[256:512, :]
_snake_case = in_proj_bias[256:512]
_snake_case = in_proj_weight[-256:, :]
_snake_case = in_proj_bias[-256:]
def a_ ( ) -> Union[str, Any]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : int , __lowercase : List[str] ) -> Any:
_snake_case = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_snake_case = 'resnet101'
if "dc5" in model_name:
_snake_case = True
_snake_case = 'panoptic' in model_name
if is_panoptic:
_snake_case = 250
else:
_snake_case = 91
_snake_case = 'huggingface/label-files'
_snake_case = 'coco-detection-id2label.json'
_snake_case = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load image processor
_snake_case = 'coco_panoptic' if is_panoptic else 'coco_detection'
_snake_case = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_snake_case = prepare_img()
_snake_case = image_processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
_snake_case = torch.hub.load('DeppMeng/ConditionalDETR' , __lowercase , pretrained=__lowercase ).eval()
_snake_case = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_snake_case = 'conditional_detr.' + src
rename_key(__lowercase , __lowercase , __lowercase )
_snake_case = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
_snake_case = conditional_detr(__lowercase )
_snake_case = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 686 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : str , __lowercase : str ) -> List[str]:
_snake_case = RobertaPreLayerNormConfig.from_pretrained(
__lowercase , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
_snake_case = torch.load(hf_hub_download(repo_id=__lowercase , filename='pytorch_model.bin' ) )
_snake_case = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
_snake_case = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
_snake_case = tensor_value
_snake_case = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowercase , config=__lowercase , state_dict=__lowercase )
model.save_pretrained(__lowercase )
# convert tokenizer
_snake_case = AutoTokenizer.from_pretrained(__lowercase )
tokenizer.save_pretrained(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 686 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
) | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 1 |
def a_ ( __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Any:
if height >= 1:
move_tower(height - 1 , __lowercase , __lowercase , __lowercase )
move_disk(__lowercase , __lowercase )
move_tower(height - 1 , __lowercase , __lowercase , __lowercase )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict ) -> Union[str, Any]:
print('moving disk from' , __lowercase , 'to' , __lowercase )
def a_ ( ) -> Optional[int]:
_snake_case = int(input('Height of hanoi: ' ).strip() )
move_tower(__lowercase , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main() | 686 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Dict = False
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCamelCase : Dict = parser.parse_args()
_lowerCamelCase : Optional[int] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
_lowerCamelCase : str = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
_lowerCamelCase : List[str] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
_lowerCamelCase : List[str] = reader.read()
_lowerCamelCase : int = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
_lowerCamelCase : str = UNetaDModel(**config)
else:
_lowerCamelCase : Tuple = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
_lowerCamelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCamelCase : Any = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCamelCase : Optional[Any] = config[key]
del config[key]
_lowerCamelCase : Tuple = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
_lowerCamelCase : int = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
_lowerCamelCase : int = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
_lowerCamelCase : Optional[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
_lowerCamelCase : Optional[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
_lowerCamelCase : Tuple = param_value
_lowerCamelCase : List[str] = True
if not has_changed:
_lowerCamelCase : str = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 686 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
import math
import os
import sys
def a_ ( __lowercase : str ) -> str:
_snake_case = ''
try:
with open(__lowercase , 'rb' ) as binary_file:
_snake_case = binary_file.read()
for dat in data:
_snake_case = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( __lowercase : dict[str, str] , __lowercase : str , __lowercase : int , __lowercase : str ) -> None:
lexicon.pop(__lowercase )
_snake_case = last_match_id
if math.loga(__lowercase ).is_integer():
for curr_key in lexicon:
_snake_case = '0' + lexicon[curr_key]
_snake_case = bin(__lowercase )[2:]
def a_ ( __lowercase : str ) -> str:
_snake_case = {'0': '0', '1': '1'}
_snake_case , _snake_case = '', ''
_snake_case = len(__lowercase )
for i in range(len(__lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_snake_case = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowercase , __lowercase , __lowercase , __lowercase )
index += 1
_snake_case = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_snake_case = lexicon[curr_string]
result += last_match_id
return result
def a_ ( __lowercase : str , __lowercase : str ) -> str:
_snake_case = os.path.getsize(__lowercase )
_snake_case = bin(__lowercase )[2:]
_snake_case = len(__lowercase )
return "0" * (length_length - 1) + file_length_binary + compressed
def a_ ( __lowercase : str , __lowercase : str ) -> None:
_snake_case = 8
try:
with open(__lowercase , 'wb' ) as opened_file:
_snake_case = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowercase ) , __lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowercase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( __lowercase : str , __lowercase : str ) -> None:
_snake_case = read_file_binary(__lowercase )
_snake_case = compress_data(__lowercase )
_snake_case = add_file_length(__lowercase , __lowercase )
write_file_binary(__lowercase , __lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 686 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( __lowercase : List[str] , __lowercase : int , __lowercase : str=None , __lowercase : Tuple=None ) -> List[Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = OPTConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : Optional[int] = "gelu"
def __init__( self : int , lowercase : List[str] , lowercase : Dict=13 , lowercase : int=7 , lowercase : int=True , lowercase : Union[str, Any]=False , lowercase : List[Any]=99 , lowercase : Tuple=16 , lowercase : Optional[int]=2 , lowercase : Tuple=4 , lowercase : Tuple=4 , lowercase : str="gelu" , lowercase : Optional[Any]=0.1 , lowercase : Optional[int]=0.1 , lowercase : Tuple=20 , lowercase : Optional[int]=2 , lowercase : Any=1 , lowercase : List[str]=0 , lowercase : List[Any]=16 , lowercase : Optional[int]=16 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = embed_dim
_snake_case = word_embed_proj_dim
_snake_case = False
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
_snake_case = prepare_opt_inputs_dict(lowercase , lowercase )
return config, inputs_dict
def A ( self : Dict , lowercase : Optional[int] , lowercase : str ):
'''simple docstring'''
_snake_case = TFOPTModel(config=lowercase )
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_UpperCAmelCase : Any = (TFOPTForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase : str = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Dict = 1_0
def A ( self : str ):
'''simple docstring'''
_snake_case = TFOPTModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : int ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : int ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase : Dict , lowercase : int ):
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_snake_case = model_class(config=lowercase )
_snake_case = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_snake_case = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase )
_snake_case = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_snake_case = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_snake_case = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase )
# check that weights remain the same after resizing
_snake_case = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case = False
self.assertTrue(lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase )
_snake_case = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case = False
self.assertTrue(lowercase )
def a_ ( __lowercase : Optional[Any] ) -> str:
return tf.constant(__lowercase , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = 9_9
def A ( self : str ):
'''simple docstring'''
_snake_case = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_snake_case = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_snake_case = input_ids.shape[0]
_snake_case = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = TFOPTModel.from_pretrained('facebook/opt-350m' )
_snake_case = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_snake_case = tf.not_equal(lowercase , model.config.pad_token_id )
with tf.GradientTape():
_snake_case = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state
_snake_case = (1, 11, 512)
self.assertEqual(output.shape , lowercase )
_snake_case = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) )
_snake_case = tf.function(lowercase , jit_compile=lowercase )
_snake_case = xla_generate(lowercase , lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
_snake_case = 'facebook/opt-350m'
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFOPTForCausalLM.from_pretrained(self.path_model )
_snake_case = GPTaTokenizer.from_pretrained(self.path_model )
_snake_case = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_snake_case = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase )
_snake_case = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_snake_case = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
_snake_case = tf.function(lowercase , jit_compile=lowercase )
_snake_case = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'facebook/opt-125m'
_snake_case = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_snake_case = []
_snake_case = GPTaTokenizer.from_pretrained(lowercase )
_snake_case = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_snake_case = tokenizer(lowercase , return_tensors='tf' ).input_ids
_snake_case = model.generate(lowercase , max_length=10 )
_snake_case = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'facebook/opt-350m'
_snake_case = GPTaTokenizer.from_pretrained(lowercase )
_snake_case = TFOPTForCausalLM.from_pretrained(lowercase )
_snake_case = 'left'
# use different length sentences to test batching
_snake_case = [
'Hello, my dog is a little',
'Today, I',
]
_snake_case = tokenizer(lowercase , return_tensors='tf' , padding=lowercase )
_snake_case = inputs['input_ids']
_snake_case = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] )
_snake_case = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_snake_case = model.generate(input_ids=lowercase )
_snake_case = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_snake_case = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_snake_case = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
_snake_case = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
_snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
_snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
_snake_case = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'facebook/opt-350m'
_snake_case = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_snake_case = []
_snake_case = GPTaTokenizer.from_pretrained(lowercase )
_snake_case = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_snake_case = tokenizer(lowercase , return_tensors='tf' ).input_ids
_snake_case = model.generate(lowercase , max_length=10 )
_snake_case = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase ) | 686 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a_ ( __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[Any]=True , __lowercase : str="pt" ) -> Union[str, Any]:
_snake_case = {'add_prefix_space': True} if isinstance(__lowercase , __lowercase ) and not line.startswith(' ' ) else {}
_snake_case = padding_side
return tokenizer(
[line] , max_length=__lowercase , padding='max_length' if pad_to_max_length else None , truncation=__lowercase , return_tensors=__lowercase , add_special_tokens=__lowercase , **__lowercase , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Any=None , ) -> List[Any]:
_snake_case = input_ids.ne(__lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Dict , lowercase : Tuple , lowercase : int , lowercase : Optional[int] , lowercase : List[Any]="train" , lowercase : Tuple=None , lowercase : Optional[Any]=None , lowercase : List[str]=None , lowercase : Dict="" , ):
'''simple docstring'''
super().__init__()
_snake_case = Path(lowercase ).joinpath(type_path + '.source' )
_snake_case = Path(lowercase ).joinpath(type_path + '.target' )
_snake_case = self.get_char_lens(self.src_file )
_snake_case = max_source_length
_snake_case = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_snake_case = tokenizer
_snake_case = prefix
if n_obs is not None:
_snake_case = self.src_lens[:n_obs]
_snake_case = src_lang
_snake_case = tgt_lang
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = index + 1 # linecache starts at 1
_snake_case = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip('\n' )
_snake_case = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip('\n' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
_snake_case = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
_snake_case = encode_line(lowercase , lowercase , self.max_source_length , 'right' )
_snake_case = encode_line(lowercase , lowercase , self.max_target_length , 'right' )
_snake_case = source_inputs['input_ids'].squeeze()
_snake_case = target_inputs['input_ids'].squeeze()
_snake_case = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A ( lowercase : Union[str, Any] ):
'''simple docstring'''
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def A ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
_snake_case = torch.stack([x['input_ids'] for x in batch] )
_snake_case = torch.stack([x['attention_mask'] for x in batch] )
_snake_case = torch.stack([x['decoder_input_ids'] for x in batch] )
_snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
_snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
_snake_case = trim_batch(lowercase , lowercase )
_snake_case , _snake_case = trim_batch(lowercase , lowercase , attention_mask=lowercase )
_snake_case = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowerCamelCase : int = getLogger(__name__)
def a_ ( __lowercase : List[List] ) -> Dict:
return list(itertools.chain.from_iterable(__lowercase ) )
def a_ ( __lowercase : str ) -> None:
_snake_case = get_git_info()
save_json(__lowercase , os.path.join(__lowercase , 'git_log.json' ) )
def a_ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Tuple=4 , **__lowercase : str ) -> str:
with open(__lowercase , 'w' ) as f:
json.dump(__lowercase , __lowercase , indent=__lowercase , **__lowercase )
def a_ ( __lowercase : Dict ) -> List[Any]:
with open(__lowercase ) as f:
return json.load(__lowercase )
def a_ ( ) -> Optional[Any]:
_snake_case = git.Repo(search_parent_directories=__lowercase )
_snake_case = {
'repo_id': str(__lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a_ ( __lowercase : Callable , __lowercase : Iterable ) -> List:
return list(map(__lowercase , __lowercase ) )
def a_ ( __lowercase : List[Any] , __lowercase : str ) -> Any:
with open(__lowercase , 'wb' ) as f:
return pickle.dump(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Any:
def remove_articles(__lowercase : Union[str, Any] ):
return re.sub(r'\b(a|an|the)\b' , ' ' , __lowercase )
def white_space_fix(__lowercase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__lowercase : List[Any] ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def a_ ( __lowercase : List[Any] , __lowercase : List[Any] ) -> Optional[Any]:
_snake_case = normalize_answer(__lowercase ).split()
_snake_case = normalize_answer(__lowercase ).split()
_snake_case = Counter(__lowercase ) & Counter(__lowercase )
_snake_case = sum(common.values() )
if num_same == 0:
return 0
_snake_case = 1.0 * num_same / len(__lowercase )
_snake_case = 1.0 * num_same / len(__lowercase )
_snake_case = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( __lowercase : List[Any] , __lowercase : Dict ) -> int:
return normalize_answer(__lowercase ) == normalize_answer(__lowercase )
def a_ ( __lowercase : List[str] , __lowercase : List[str] ) -> Dict:
assert len(__lowercase ) == len(__lowercase )
_snake_case = 0
for hypo, pred in zip(__lowercase , __lowercase ):
em += exact_match_score(__lowercase , __lowercase )
if len(__lowercase ) > 0:
em /= len(__lowercase )
return {"em": em}
def a_ ( __lowercase : Tuple ) -> Optional[int]:
return model_prefix.startswith('rag' )
def a_ ( __lowercase : Dict , __lowercase : Optional[int] , __lowercase : List[Any] ) -> int:
_snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_snake_case = 'dropout_rate'
for p in extra_params:
if getattr(__lowercase , __lowercase , __lowercase ):
if not hasattr(__lowercase , __lowercase ) and not hasattr(__lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__lowercase ) )
delattr(__lowercase , __lowercase )
continue
_snake_case = p if hasattr(__lowercase , __lowercase ) else equivalent_param[p]
setattr(__lowercase , __lowercase , getattr(__lowercase , __lowercase ) )
delattr(__lowercase , __lowercase )
return hparams, config | 686 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 1 |
from __future__ import annotations
def a_ ( __lowercase : list[int] ) -> bool:
return len(set(__lowercase ) ) == len(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowerCamelCase : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Any , lowercase : str = None , lowercase : uuid.UUID = None , lowercase : Union[str, Any]=None , lowercase : Any=None ):
'''simple docstring'''
if not conversation_id:
_snake_case = uuid.uuida()
if past_user_inputs is None:
_snake_case = []
if generated_responses is None:
_snake_case = []
_snake_case = conversation_id
_snake_case = past_user_inputs
_snake_case = generated_responses
_snake_case = text
def __eq__( self : List[str] , lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def A ( self : Tuple , lowercase : str , lowercase : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
_snake_case = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
_snake_case = text
def A ( self : Tuple ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_snake_case = None
def A ( self : List[Any] , lowercase : str ):
'''simple docstring'''
self.generated_responses.append(lowercase )
def A ( self : int ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : List[Any] ):
'''simple docstring'''
_snake_case = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
_snake_case = 'user' if is_user else 'bot'
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
UpperCAmelCase ,r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , *lowercase : List[Any] , **lowercase : Any ):
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
if self.tokenizer.pad_token_id is None:
_snake_case = self.tokenizer.eos_token
def A ( self : Optional[Any] , lowercase : str=None , lowercase : Union[str, Any]=None , lowercase : List[Any]=None , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = {}
_snake_case = {}
_snake_case = {}
if min_length_for_response is not None:
_snake_case = min_length_for_response
if minimum_tokens is not None:
_snake_case = minimum_tokens
if "max_length" in generate_kwargs:
_snake_case = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Dict , lowercase : Union[Conversation, List[Conversation]] , lowercase : List[str]=0 , **lowercase : List[Any] ):
'''simple docstring'''
_snake_case = super().__call__(lowercase , num_workers=lowercase , **lowercase )
if isinstance(lowercase , lowercase ) and len(lowercase ) == 1:
return outputs[0]
return outputs
def A ( self : List[str] , lowercase : Conversation , lowercase : int=32 ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_snake_case = self.tokenizer._build_conversation_input_ids(lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_snake_case = self._legacy_parse_and_tokenize(lowercase )
if self.framework == "pt":
_snake_case = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_snake_case = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def A ( self : List[str] , lowercase : str , lowercase : Optional[int]=10 , **lowercase : Tuple ):
'''simple docstring'''
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length )
_snake_case = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
_snake_case = max_length - minimum_tokens
_snake_case = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_snake_case = model_inputs['attention_mask'][:, -trim:]
_snake_case = model_inputs.pop('conversation' )
_snake_case = max_length
_snake_case = self.model.generate(**lowercase , **lowercase )
if self.model.config.is_encoder_decoder:
_snake_case = 1
else:
_snake_case = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def A ( self : Any , lowercase : List[str] , lowercase : Tuple=True ):
'''simple docstring'''
_snake_case = model_outputs['output_ids']
_snake_case = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase , )
_snake_case = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowercase )
return conversation
def A ( self : Dict , lowercase : Conversation ):
'''simple docstring'''
_snake_case = self.tokenizer.eos_token_id
_snake_case = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase , add_special_tokens=lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowercase , add_special_tokens=lowercase ) )
if len(lowercase ) > self.tokenizer.model_max_length:
_snake_case = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 686 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 1 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_lowerCamelCase : Union[str, Any] = {
'''allenai/led-base-16384''': 16_384,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = LEDTokenizer
_UpperCAmelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : str , lowercase : Union[str, Any]=None , lowercase : Tuple=None , lowercase : int=None , lowercase : Optional[int]="replace" , lowercase : Any="<s>" , lowercase : Optional[Any]="</s>" , lowercase : str="</s>" , lowercase : Optional[int]="<s>" , lowercase : Any="<unk>" , lowercase : Tuple="<pad>" , lowercase : str="<mask>" , lowercase : List[str]=False , lowercase : int=True , **lowercase : str , ):
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase ) != add_prefix_space:
_snake_case = getattr(lowercase , pre_tok_state.pop('type' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**lowercase )
_snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_snake_case = 'post_processor'
_snake_case = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state['sep'] )
if "cls" in state:
_snake_case = tuple(state['cls'] )
_snake_case = False
if state.get('add_prefix_space' , lowercase ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get('trim_offsets' , lowercase ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(lowercase , state.pop('type' ) )
_snake_case = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A ( self : int ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : Any , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
_snake_case = value
def A ( self : List[Any] , *lowercase : Optional[Any] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = kwargs.get('is_split_into_words' , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase , **lowercase )
def A ( self : Union[str, Any] , *lowercase : str , **lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = kwargs.get('is_split_into_words' , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase , **lowercase )
def A ( self : Any , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
_snake_case = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def A ( self : str , lowercase : Dict , lowercase : int=None ):
'''simple docstring'''
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : Optional[int] , lowercase : Union[Dict[str, EncodedInput], BatchEncoding] , lowercase : Optional[int] = None , lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
_snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase )
if needs_to_be_padded:
_snake_case = len(lowercase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 686 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
def a_ ( __lowercase : list[int] , __lowercase : list[int] , __lowercase : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowercase ) )
def a_ ( __lowercase : list[list[int]] , __lowercase : int , __lowercase : list[int] , __lowercase : int ) -> bool:
# Base Case
if index == len(__lowercase ):
return True
# Recursive Step
for i in range(__lowercase ):
if valid_coloring(graph[index] , __lowercase , __lowercase ):
# Color current vertex
_snake_case = i
# Validate coloring
if util_color(__lowercase , __lowercase , __lowercase , index + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def a_ ( __lowercase : list[list[int]] , __lowercase : int ) -> list[int]:
_snake_case = [-1] * len(__lowercase )
if util_color(__lowercase , __lowercase , __lowercase , 0 ):
return colored_vertices
return [] | 686 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def a_ ( __lowercase : Any , __lowercase : Optional[Any]=False ) -> str:
try:
_snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_snake_case = default
else:
# KEY is set, convert it to True or False.
try:
_snake_case = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_lowerCamelCase : Tuple = parse_flag_from_env('''RUN_SLOW''', default=False)
def a_ ( __lowercase : Union[str, Any] ) -> Union[str, Any]:
return unittest.skip('Test was skipped' )(__lowercase )
def a_ ( __lowercase : Any ) -> Tuple:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__lowercase )
def a_ ( __lowercase : int ) -> Dict:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__lowercase )
def a_ ( __lowercase : Union[str, Any] ) -> Any:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__lowercase )
def a_ ( __lowercase : Union[str, Any] ) -> Tuple:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__lowercase )
def a_ ( __lowercase : Optional[Any] ) -> Dict:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__lowercase )
def a_ ( __lowercase : Dict ) -> Optional[Any]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__lowercase )
def a_ ( __lowercase : List[Any] ) -> List[Any]:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__lowercase )
def a_ ( __lowercase : List[str] ) -> Tuple:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__lowercase )
def a_ ( __lowercase : Optional[Any] ) -> List[str]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__lowercase )
def a_ ( __lowercase : List[str] ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__lowercase )
def a_ ( __lowercase : List[Any] ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__lowercase )
def a_ ( __lowercase : str ) -> str:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__lowercase )
def a_ ( __lowercase : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__lowercase )
def a_ ( __lowercase : Optional[Any] ) -> List[str]:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__lowercase )
def a_ ( __lowercase : Optional[int] ) -> int:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__lowercase )
def a_ ( __lowercase : Dict=None , __lowercase : List[str]=None ) -> List[Any]:
if test_case is None:
return partial(__lowercase , version=__lowercase )
return unittest.skipUnless(is_torch_version('>=' , __lowercase ) , f'''test requires torch version >= {version}''' )(__lowercase )
def a_ ( __lowercase : Union[str, Any] ) -> List[str]:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__lowercase )
def a_ ( __lowercase : Tuple ) -> Any:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__lowercase )
def a_ ( __lowercase : int ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__lowercase )
_lowerCamelCase : Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def a_ ( __lowercase : Any ) -> Tuple:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__lowercase )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Any = True
@classmethod
def A ( cls : int ):
'''simple docstring'''
_snake_case = tempfile.mkdtemp()
@classmethod
def A ( cls : int ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] , lowercase : Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
_snake_case = mocks if isinstance(lowercase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def a_ ( __lowercase : Tuple ) -> int:
_snake_case = AcceleratorState()
_snake_case = tensor[None].clone().to(state.device )
_snake_case = gather(__lowercase ).cpu()
_snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowercase ):
return False
return True
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : str , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case = returncode
_snake_case = stdout
_snake_case = stderr
async def a_ ( __lowercase : Any , __lowercase : int ) -> List[str]:
while True:
_snake_case = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def a_ ( __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None , __lowercase : Optional[Any]=None , __lowercase : int=None , __lowercase : Union[str, Any]=False , __lowercase : Dict=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(__lowercase ) )
_snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_snake_case = []
_snake_case = []
def tee(__lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[Any]="" ):
_snake_case = line.decode('utf-8' ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase , __lowercase , file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ) ),
] , timeout=__lowercase , )
return _RunOutput(await p.wait() , __lowercase , __lowercase )
def a_ ( __lowercase : Tuple , __lowercase : Tuple=None , __lowercase : List[str]=None , __lowercase : Dict=180 , __lowercase : str=False , __lowercase : Optional[int]=True ) -> _RunOutput:
_snake_case = asyncio.get_event_loop()
_snake_case = loop.run_until_complete(
_stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) )
_snake_case = ' '.join(__lowercase )
if result.returncode > 0:
_snake_case = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
pass
def a_ ( __lowercase : List[str] , __lowercase : int=False ) -> List[str]:
try:
_snake_case = subprocess.check_output(__lowercase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowercase , 'decode' ):
_snake_case = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(__lowercase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_lowerCamelCase : Tuple = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : int=False , lowercase : Optional[int]=False , lowercase : Any=6.0 , lowercase : List[str]=None , lowercase : Optional[Any]=False , lowercase : Union[str, Any]=False , lowercase : Optional[int]=None , lowercase : List[str]="fp4" , lowercase : int=False , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = load_in_abit
_snake_case = load_in_abit
_snake_case = llm_inta_threshold
_snake_case = llm_inta_skip_modules
_snake_case = llm_inta_enable_fpaa_cpu_offload
_snake_case = llm_inta_has_fpaa_weight
_snake_case = bnb_abit_quant_type
_snake_case = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_snake_case = torch.floataa
elif isinstance(lowercase , lowercase ):
_snake_case = getattr(lowercase , lowercase )
elif isinstance(lowercase , torch.dtype ):
_snake_case = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def A ( self : Optional[int] ):
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , lowercase ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowercase ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowercase ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowercase ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , lowercase ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , lowercase ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def A ( self : List[Any] ):
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def A ( self : str ):
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def A ( cls : Any , lowercase : Union[str, Any] , lowercase : Optional[int] , **lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = cls(**lowercase )
_snake_case = []
for key, value in kwargs.items():
if hasattr(lowercase , lowercase ):
setattr(lowercase , lowercase , lowercase )
to_remove.append(lowercase )
for key in to_remove:
kwargs.pop(lowercase , lowercase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def A ( self : Union[str, Any] , lowercase : Union[str, os.PathLike] ):
'''simple docstring'''
with open(lowercase , 'w' , encoding='utf-8' ) as writer:
_snake_case = self.to_dict()
_snake_case = json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + '\n'
writer.write(lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self : Any ):
'''simple docstring'''
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def A ( self : Union[str, Any] , lowercase : bool = True ):
'''simple docstring'''
if use_diff is True:
_snake_case = self.to_diff_dict()
else:
_snake_case = self.to_dict()
return json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + "\n"
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.to_dict()
# get the default config dict
_snake_case = BitsAndBytesConfig().to_dict()
_snake_case = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_snake_case = value
return serializable_config_dict | 686 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger('''transformers.models.encodec''')
_lowerCamelCase : List[Any] = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
_lowerCamelCase : int = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
_lowerCamelCase : Any = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
_lowerCamelCase : Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
_lowerCamelCase : List[Any] = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
_lowerCamelCase : List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCamelCase : List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Any = []
def a_ ( __lowercase : int , __lowercase : Any , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : Any ) -> str:
for attribute in key.split('.' ):
_snake_case = getattr(__lowercase , __lowercase )
if weight_type is not None:
_snake_case = getattr(__lowercase , __lowercase ).shape
else:
_snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
elif weight_type == "running_mean":
_snake_case = value
elif weight_type == "running_var":
_snake_case = value
elif weight_type == "num_batches_tracked":
_snake_case = value
elif weight_type == "weight_ih_l0":
_snake_case = value
elif weight_type == "weight_hh_l0":
_snake_case = value
elif weight_type == "bias_ih_l0":
_snake_case = value
elif weight_type == "bias_hh_l0":
_snake_case = value
elif weight_type == "weight_ih_l1":
_snake_case = value
elif weight_type == "weight_hh_l1":
_snake_case = value
elif weight_type == "bias_ih_l1":
_snake_case = value
elif weight_type == "bias_hh_l1":
_snake_case = value
else:
_snake_case = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( __lowercase : List[str] , __lowercase : Tuple ) -> Dict:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_snake_case , _snake_case = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[int]:
_snake_case = []
if model_name == "encodec_24khz" or "encodec_32khz":
_snake_case = MAPPING_24K
elif model_name == "encodec_48khz":
_snake_case = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__lowercase , __lowercase ):
logger.info(f'''{name} was ignored''' )
continue
_snake_case = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_snake_case , _snake_case = key.split('.*.' )
if prefix in name and suffix in name:
_snake_case = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__lowercase )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __lowercase )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight_ih_l0" in name:
_snake_case = 'weight_ih_l0'
elif "weight_hh_l0" in name:
_snake_case = 'weight_hh_l0'
elif "bias_ih_l0" in name:
_snake_case = 'bias_ih_l0'
elif "bias_hh_l0" in name:
_snake_case = 'bias_hh_l0'
elif "weight_ih_l1" in name:
_snake_case = 'weight_ih_l1'
elif "weight_hh_l1" in name:
_snake_case = 'weight_hh_l1'
elif "bias_ih_l1" in name:
_snake_case = 'bias_ih_l1'
elif "bias_hh_l1" in name:
_snake_case = 'bias_hh_l1'
elif "bias" in name:
_snake_case = 'bias'
elif "weight" in name:
_snake_case = 'weight'
elif "running_mean" in name:
_snake_case = 'running_mean'
elif "running_var" in name:
_snake_case = 'running_var'
elif "num_batches_tracked" in name:
_snake_case = 'num_batches_tracked'
else:
_snake_case = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : List[str]=None , __lowercase : Dict=None , ) -> Dict:
if config_path is not None:
_snake_case = EncodecConfig.from_pretrained(__lowercase )
else:
_snake_case = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_snake_case = [8, 5, 4, 4]
_snake_case = [2.2]
_snake_case = 64
_snake_case = 32_000
_snake_case = 2_048
_snake_case = False
_snake_case = False
_snake_case = False
elif model_name == "encodec_48khz":
_snake_case = [8, 5, 4, 2]
_snake_case = [3.0, 6.0, 1_2.0, 2_4.0]
_snake_case = 48_000
_snake_case = 2
_snake_case = False
_snake_case = 'time_group_norm'
_snake_case = True
_snake_case = 1.0
_snake_case = 0.0_1
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
_snake_case = EncodecModel(__lowercase )
_snake_case = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__lowercase )
_snake_case = torch.load(__lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_snake_case = original_checkpoint['best_state']
recursively_load_weights(__lowercase , __lowercase , __lowercase )
model.save_pretrained(__lowercase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__lowercase )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
_snake_case = jnp.ones((batch_size, length) ) / length
return scores
def A ( self : Dict ):
'''simple docstring'''
_snake_case = None
_snake_case = 20
_snake_case = self._get_uniform_logits(batch_size=2 , length=lowercase )
# tweak scores to not be uniform anymore
_snake_case = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_snake_case = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_snake_case = jax.nn.softmax(lowercase , axis=-1 )
_snake_case = FlaxTemperatureLogitsWarper(temperature=0.5 )
_snake_case = FlaxTemperatureLogitsWarper(temperature=1.3 )
_snake_case = jax.nn.softmax(temp_dist_warper_sharper(lowercase , scores.copy() , cur_len=lowercase ) , axis=-1 )
_snake_case = jax.nn.softmax(temp_dist_warper_smoother(lowercase , scores.copy() , cur_len=lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = None
_snake_case = 10
_snake_case = 2
# create ramp distribution
_snake_case = np.broadcast_to(np.arange(lowercase )[None, :] , (batch_size, vocab_size) ).copy()
_snake_case = ramp_logits[1:, : vocab_size // 2] + vocab_size
_snake_case = FlaxTopKLogitsWarper(3 )
_snake_case = top_k_warp(lowercase , lowercase , cur_len=lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_snake_case = 5
_snake_case = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_snake_case = np.broadcast_to(np.arange(lowercase )[None, :] , (batch_size, length) ).copy()
_snake_case = top_k_warp_safety_check(lowercase , lowercase , cur_len=lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = None
_snake_case = 10
_snake_case = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_snake_case = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_snake_case = FlaxTopPLogitsWarper(0.8 )
_snake_case = np.exp(top_p_warp(lowercase , lowercase , cur_len=lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_snake_case = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_snake_case = np.broadcast_to(np.arange(lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_snake_case = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_snake_case = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_snake_case = top_p_warp(lowercase , lowercase , cur_len=lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 20
_snake_case = 4
_snake_case = 0
_snake_case = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase )
# check that min length is applied at length 5
_snake_case = ids_tensor((batch_size, 20) , vocab_size=20 )
_snake_case = 5
_snake_case = self._get_uniform_logits(lowercase , lowercase )
_snake_case = min_dist_processor(lowercase , lowercase , cur_len=lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
_snake_case = self._get_uniform_logits(lowercase , lowercase )
_snake_case = 15
_snake_case = min_dist_processor(lowercase , lowercase , cur_len=lowercase )
self.assertFalse(jnp.isinf(lowercase ).any() )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 20
_snake_case = 4
_snake_case = 0
_snake_case = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase )
# check that all scores are -inf except the bos_token_id score
_snake_case = ids_tensor((batch_size, 1) , vocab_size=20 )
_snake_case = 1
_snake_case = self._get_uniform_logits(lowercase , lowercase )
_snake_case = logits_processor(lowercase , lowercase , cur_len=lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_snake_case = 3
_snake_case = self._get_uniform_logits(lowercase , lowercase )
_snake_case = logits_processor(lowercase , lowercase , cur_len=lowercase )
self.assertFalse(jnp.isinf(lowercase ).any() )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 20
_snake_case = 4
_snake_case = 0
_snake_case = 5
_snake_case = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase , eos_token_id=lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
_snake_case = ids_tensor((batch_size, 4) , vocab_size=20 )
_snake_case = 4
_snake_case = self._get_uniform_logits(lowercase , lowercase )
_snake_case = logits_processor(lowercase , lowercase , cur_len=lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_snake_case = 3
_snake_case = self._get_uniform_logits(lowercase , lowercase )
_snake_case = logits_processor(lowercase , lowercase , cur_len=lowercase )
self.assertFalse(jnp.isinf(lowercase ).any() )
def A ( self : str ):
'''simple docstring'''
_snake_case = 4
_snake_case = 10
_snake_case = 15
_snake_case = 2
_snake_case = 1
_snake_case = 15
# dummy input_ids and scores
_snake_case = ids_tensor((batch_size, sequence_length) , lowercase )
_snake_case = input_ids.copy()
_snake_case = self._get_uniform_logits(lowercase , lowercase )
_snake_case = scores.copy()
# instantiate all dist processors
_snake_case = FlaxTemperatureLogitsWarper(temperature=0.5 )
_snake_case = FlaxTopKLogitsWarper(3 )
_snake_case = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_snake_case = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase )
_snake_case = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase )
_snake_case = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase , eos_token_id=lowercase )
_snake_case = 10
# no processor list
_snake_case = temp_dist_warp(lowercase , lowercase , cur_len=lowercase )
_snake_case = top_k_warp(lowercase , lowercase , cur_len=lowercase )
_snake_case = top_p_warp(lowercase , lowercase , cur_len=lowercase )
_snake_case = min_dist_proc(lowercase , lowercase , cur_len=lowercase )
_snake_case = bos_dist_proc(lowercase , lowercase , cur_len=lowercase )
_snake_case = eos_dist_proc(lowercase , lowercase , cur_len=lowercase )
# with processor list
_snake_case = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_snake_case = processor(lowercase , lowercase , cur_len=lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase , lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 4
_snake_case = 10
_snake_case = 15
_snake_case = 2
_snake_case = 1
_snake_case = 15
# dummy input_ids and scores
_snake_case = ids_tensor((batch_size, sequence_length) , lowercase )
_snake_case = input_ids.copy()
_snake_case = self._get_uniform_logits(lowercase , lowercase )
_snake_case = scores.copy()
# instantiate all dist processors
_snake_case = FlaxTemperatureLogitsWarper(temperature=0.5 )
_snake_case = FlaxTopKLogitsWarper(3 )
_snake_case = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_snake_case = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase )
_snake_case = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase )
_snake_case = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase , eos_token_id=lowercase )
_snake_case = 10
# no processor list
def run_no_processor_list(lowercase : Any , lowercase : Optional[int] , lowercase : List[Any] ):
_snake_case = temp_dist_warp(lowercase , lowercase , cur_len=lowercase )
_snake_case = top_k_warp(lowercase , lowercase , cur_len=lowercase )
_snake_case = top_p_warp(lowercase , lowercase , cur_len=lowercase )
_snake_case = min_dist_proc(lowercase , lowercase , cur_len=lowercase )
_snake_case = bos_dist_proc(lowercase , lowercase , cur_len=lowercase )
_snake_case = eos_dist_proc(lowercase , lowercase , cur_len=lowercase )
return scores
# with processor list
def run_processor_list(lowercase : List[str] , lowercase : List[str] , lowercase : List[str] ):
_snake_case = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_snake_case = processor(lowercase , lowercase , cur_len=lowercase )
return scores
_snake_case = jax.jit(lowercase )
_snake_case = jax.jit(lowercase )
_snake_case = jitted_run_no_processor_list(lowercase , lowercase , lowercase )
_snake_case = jitted_run_processor_list(lowercase , lowercase , lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase , lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 686 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 1 |
import os
def a_ ( ) -> Optional[int]:
with open(os.path.dirname(__lowercase ) + '/grid.txt' ) as f:
_snake_case = [] # noqa: E741
for _ in range(20 ):
l.append([int(__lowercase ) for x in f.readline().split()] )
_snake_case = 0
# right
for i in range(20 ):
for j in range(17 ):
_snake_case = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_snake_case = temp
# down
for i in range(17 ):
for j in range(20 ):
_snake_case = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_snake_case = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_snake_case = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_snake_case = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_snake_case = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_snake_case = temp
return maximum
if __name__ == "__main__":
print(solution()) | 686 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Any = LxmertTokenizer
_UpperCAmelCase : Union[str, Any] = LxmertTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def A ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A ( self : List[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def A ( self : Optional[int] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'I was born in 92000, and this is falsé.'
_snake_case = tokenizer.tokenize(lowercase )
_snake_case = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_snake_case = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowercase )
_snake_case = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase ) | 686 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 1 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCamelCase : List[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 686 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Dict = BloomTokenizerFast
_UpperCAmelCase : Optional[Any] = BloomTokenizerFast
_UpperCAmelCase : int = True
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : List[Any] = "tokenizer_file"
_UpperCAmelCase : Tuple = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_snake_case = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[int] , **lowercase : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.get_rust_tokenizer()
_snake_case = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_snake_case = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
_snake_case = tokenizer.batch_encode_plus(lowercase )['input_ids']
self.assertListEqual(lowercase , lowercase )
_snake_case = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def A ( self : Optional[int] , lowercase : Optional[int]=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_snake_case = 'This is a simple input'
_snake_case = ['This is a simple input 1', 'This is a simple input 2']
_snake_case = ('This is a simple input', 'This is a pair')
_snake_case = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(lowercase , max_length=lowercase )
tokenizer_r.encode_plus(lowercase , max_length=lowercase )
tokenizer_r.batch_encode_plus(lowercase , max_length=lowercase )
tokenizer_r.encode(lowercase , max_length=lowercase )
tokenizer_r.batch_encode_plus(lowercase , max_length=lowercase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
_snake_case = None # Hotfixing padding = None
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' )
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' )
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' )
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.get_rust_tokenizer()
_snake_case = load_dataset('xnli' , 'all_languages' , split='test' , streaming=lowercase )
_snake_case = next(iter(lowercase ) )['premise'] # pick up one data
_snake_case = list(sample_data.values() )
_snake_case = list(map(tokenizer.encode , lowercase ) )
_snake_case = [tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) for x in output_tokens]
self.assertListEqual(lowercase , lowercase )
def A ( self : str ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 686 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Any , lowercase : NestedDataStructureLike[PathLike] , lowercase : Optional[NamedSplit] = None , lowercase : Optional[Features] = None , lowercase : str = None , lowercase : bool = False , lowercase : bool = False , lowercase : Optional[str] = None , lowercase : Optional[int] = None , **lowercase : Tuple , ):
'''simple docstring'''
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
_snake_case = field
_snake_case = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
_snake_case = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def A ( self : Tuple ):
'''simple docstring'''
if self.streaming:
_snake_case = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
_snake_case = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : int , lowercase : Dataset , lowercase : Union[PathLike, BinaryIO] , lowercase : Optional[int] = None , lowercase : Optional[int] = None , **lowercase : Any , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
_snake_case = dataset
_snake_case = path_or_buf
_snake_case = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_snake_case = num_proc
_snake_case = 'utf-8'
_snake_case = to_json_kwargs
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.to_json_kwargs.pop('path_or_buf' , lowercase )
_snake_case = self.to_json_kwargs.pop('orient' , 'records' )
_snake_case = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
_snake_case = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
_snake_case = self.to_json_kwargs.pop('compression' , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=lowercase ) as buffer:
_snake_case = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
_snake_case = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def A ( self : Tuple , lowercase : Dict ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = args
_snake_case = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
_snake_case = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def A ( self : Tuple , lowercase : BinaryIO , lowercase : Dict , lowercase : Dict , lowercase : str , **lowercase : Dict , ):
'''simple docstring'''
_snake_case = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
_snake_case = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
_snake_case , _snake_case = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(lowercase )
return written | 686 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 0 |
from __future__ import annotations
import numpy as np
def _A ( _lowercase ) -> int:
"""simple docstring"""
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase_ = {
"""n_samples""": 6_4,
"""horizon""": 3_2,
"""num_inference_steps""": 2_0,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
UpperCAmelCase_ = """hopper-medium-v2"""
UpperCAmelCase_ = gym.make(env_name)
UpperCAmelCase_ = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
UpperCAmelCase_ = env.reset()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1_0_0_0
UpperCAmelCase_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase_ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = env.step(denorm_actions)
UpperCAmelCase_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase_ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 2 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 0 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20])
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20])
def A_( A : str , A : List[Any] , A : Tuple):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , A)
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(A)
assert result == expected
| 3 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : tuple , _UpperCAmelCase : Path , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=False , ):
output_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , use_external_data_format=_UpperCAmelCase , enable_onnx_checker=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
else:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : bool = False ):
lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCAmelCase = 'cpu'
lowerCAmelCase = Path(_UpperCAmelCase )
# VAE DECODER
lowerCAmelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
lowerCAmelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCAmelCase = vae_decoder.decode
onnx_export(
_UpperCAmelCase , model_args=(
torch.randn(1 , _UpperCAmelCase , 25 , 25 ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_UpperCAmelCase , )
del vae_decoder
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__UpperCamelCase : int = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 4 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = '''sew'''
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase=2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=0 , _lowercase=1 , _lowercase=2 , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_norm
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = squeeze_factor
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = apply_spec_augment
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# sequence classification
_lowerCAmelCase = use_weighted_layer_sum
_lowerCAmelCase = classifier_proj_size
@property
def _lowercase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
SCREAMING_SNAKE_CASE__ = mam_aaa["""model"""]
remove_ignore_keys_(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
SCREAMING_SNAKE_CASE__ = MaMaaaConfig(
vocab_size=UpperCamelCase__ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
SCREAMING_SNAKE_CASE__ = state_dict["""decoder.embed_tokens.weight"""]
SCREAMING_SNAKE_CASE__ = MaMaaaForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 6 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Tuple ):
_A = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase_ ( self : Tuple ):
with self.assertRaises(_UpperCAmelCase ):
_A = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def lowerCAmelCase_ ( self : str ):
with self.assertRaises(_UpperCAmelCase ):
_A = pa.array(TypedSequence([1, 2, 3] , try_type=Value('bool' ) , type=Value('int64' ) ) )
def lowerCAmelCase_ ( self : str ):
_A = pa.array(TypedSequence([1, 2, 3] , type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase_ ( self : Union[str, Any] ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_A = pa.array(TypedSequence(['foo', 'bar'] , type=Value('int64' ) ) )
def lowerCAmelCase_ ( self : int ):
_A = pa.array(TypedSequence([1, 2, 3] , try_type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase_ ( self : List[str] ):
_A = pa.array(TypedSequence(['foo', 'bar'] , try_type=Value('int64' ) ) )
self.assertEqual(arr.type , pa.string() )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_A = pa.array(TypedSequence(['foo', 'bar'] , type=ArrayaD((1, 3) , 'int64' ) ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def lowerCAmelCase_ ( self : Dict ):
_A = pa.array(TypedSequence(['foo', 'bar'] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def lowerCAmelCase_ ( self : Union[str, Any] ):
import PIL.Image
_A = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' , side_effect=_UpperCAmelCase ) as mock_cast_to_python_objects:
_A = pa.array(TypedSequence([{'path': None, 'bytes': b'image_bytes'}, pil_image] , type=Image() ) )
_A , _A = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' , _UpperCAmelCase )
self.assertFalse(kwargs['optimize_list_casting'] )
def _snake_case ( _snake_case : Any , _snake_case : int ) -> List[str]:
'''simple docstring'''
_A = pa.BufferReader(_snake_case ) if isinstance(_snake_case , pa.Buffer ) else pa.memory_map(_snake_case )
_A = pa.ipc.open_stream(_snake_case )
_A = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Dict ) -> Optional[int]:
'''simple docstring'''
_A = pa.BufferOutputStream()
_A = pa.schema(_snake_case ) if fields else None
with ArrowWriter(stream=_snake_case , schema=_snake_case , writer_batch_size=_snake_case ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_A = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_snake_case , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _snake_case ( ) -> Dict:
'''simple docstring'''
_A = pa.BufferOutputStream()
_A = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=_snake_case , features=_snake_case ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_A = pa.BufferReader(output.getvalue() )
_A = pa.ipc.open_stream(_snake_case )
_A = f.read_all()
_A = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_snake_case )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def _snake_case ( _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A = pa.BufferOutputStream()
with ArrowWriter(
stream=_snake_case , writer_batch_size=_snake_case , hash_salt='split_name' , check_duplicates=_snake_case , ) as writer:
with pytest.raises(_snake_case ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
_A , _A = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def _snake_case ( _snake_case : str ) -> Dict:
'''simple docstring'''
_A = pa.BufferOutputStream()
with ArrowWriter(
stream=_snake_case , writer_batch_size=_snake_case , hash_salt='split_name' , check_duplicates=_snake_case , ) as writer:
with pytest.raises(_snake_case ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
_A , _A = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def _snake_case ( _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = pa.BufferOutputStream()
with ArrowWriter(
stream=_snake_case , writer_batch_size=_snake_case , hash_salt='split_name' , check_duplicates=_snake_case , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _snake_case ( _snake_case : int , _snake_case : Tuple ) -> List[Any]:
'''simple docstring'''
_A = pa.BufferOutputStream()
_A = pa.schema(_snake_case ) if fields else None
with ArrowWriter(stream=_snake_case , schema=_snake_case , writer_batch_size=_snake_case ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_A = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_snake_case , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ) -> Dict:
'''simple docstring'''
_A = pa.BufferOutputStream()
_A = pa.schema(_snake_case ) if fields else None
with ArrowWriter(stream=_snake_case , schema=_snake_case , writer_batch_size=_snake_case ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_A = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_snake_case , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A = pa.BufferOutputStream()
_A = pa.schema(_snake_case ) if fields else None
with ArrowWriter(stream=_snake_case , schema=_snake_case , writer_batch_size=_snake_case ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_A = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_snake_case , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = {'col_1': pa.string(), 'col_2': pa.intaa()}
_A = os.path.join(_snake_case , 'test.arrow' )
with ArrowWriter(path=_snake_case , schema=pa.schema(_snake_case ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_snake_case , metadata=writer._schema.metadata )
_check_output(_snake_case , 1 )
def _snake_case ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
if pa.types.is_list(_snake_case ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _snake_case ( _snake_case : Any , _snake_case : List[Any] ) -> Any:
'''simple docstring'''
if isinstance(lst[0] , _snake_case ):
change_first_primitive_element_in_list(lst[0] , _snake_case )
else:
_A = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Optional[int] ) -> Any:
'''simple docstring'''
_A = pa.array(TypedSequence(_snake_case , optimized_int_type=_snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = pa.array(OptimizedTypedSequence(_snake_case , col=_snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_A = copy.deepcopy(_snake_case )
_A = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_snake_case , _snake_case )
_A = pa.array(OptimizedTypedSequence(_snake_case , col=_snake_case ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] ) -> str:
'''simple docstring'''
_A = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=_snake_case ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _snake_case ( _snake_case : int ) -> Any:
'''simple docstring'''
_A = 'mock://dataset-train.arrow'
with ArrowWriter(path=_snake_case , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_snake_case ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_snake_case )
def _snake_case ( ) -> Dict:
'''simple docstring'''
_A = pa.BufferOutputStream()
with ParquetWriter(stream=_snake_case ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
_A , _A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_A = pa.BufferReader(output.getvalue() )
_A = pq.read_table(_snake_case )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def _snake_case ( _snake_case : str , _snake_case : List[str] ) -> Dict:
'''simple docstring'''
import PIL.Image
_A = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_snake_case , format='png' )
_A = pa.BufferOutputStream()
with ParquetWriter(
stream=_snake_case , features=Features({'image': Image()} ) , embed_local_files=_snake_case ) as writer:
writer.write({'image': image_path} )
writer.finalize()
_A = pa.BufferReader(output.getvalue() )
_A = pq.read_table(_snake_case )
_A = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , _snake_case )
with open(_snake_case , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _snake_case ( ) -> int:
'''simple docstring'''
_A = pa.schema([pa.field('col_1' , pa.string() , nullable=_snake_case )] )
_A = pa.BufferOutputStream()
with ArrowWriter(stream=_snake_case ) as writer:
writer._build_writer(inferred_schema=_snake_case )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 7 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''LayoutLMv2ImageProcessor'''
lowerCAmelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__A : str = kwargs.pop('feature_extractor')
__A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.')
# first, apply the image processor
__A : Dict = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__A : Dict = features['words']
__A : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__A : Dict = features.pop('pixel_values')
if return_overflowing_tokens is True:
__A : str = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'])
__A : str = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(_UpperCAmelCase)} and {len(_UpperCAmelCase)}')
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor | 8 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 0 |
def A ( __UpperCamelCase ) -> tuple[int, int]:
try:
A__ = float(__UpperCamelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
A__ = decimal - int(__UpperCamelCase )
if fractional_part == 0:
return int(__UpperCamelCase ), 1
else:
A__ = len(str(__UpperCamelCase ).split('.' )[1] )
A__ = int(decimal * (10**number_of_frac_digits) )
A__ = 10**number_of_frac_digits
A__ , A__ = denominator, numerator
while True:
A__ = dividend % divisor
if remainder == 0:
break
A__ , A__ = divisor, remainder
A__ , A__ = numerator / divisor, denominator / divisor
return int(__UpperCamelCase ), int(__UpperCamelCase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 9 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 0 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 10 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return 10 - x * x
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if equation(__A) * equation(__A) >= 0:
raise ValueError('''Wrong space!''')
_a = a
while (b - a) >= 0.01:
# Find middle point
_a = (a + b) / 2
# Check if middle point is root
if equation(__A) == 0.0:
break
# Decide the side to repeat the steps
if equation(__A) * equation(__A) < 0:
_a = c
else:
_a = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 11 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Tuple = CodeGenTokenizer
__lowerCAmelCase : Optional[int] = CodeGenTokenizerFast
__lowerCAmelCase : Any = True
__lowerCAmelCase : Tuple = {'add_prefix_space': True}
__lowerCAmelCase : str = False
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowercase__ : int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_))))
lowercase__ : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase__ : Dict = {"""unk_token""": """<unk>"""}
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE_))
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = """lower newer"""
lowercase__ : Optional[Any] = """lower newer"""
return input_text, output_text
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowercase__ : List[Any] = """lower newer"""
lowercase__ : str = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase__ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokens + [tokenizer.unk_token]
lowercase__ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase__ : str = self.get_tokenizer()
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = """lower newer"""
# Testing tokenization
lowercase__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_)
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# Testing conversion to ids without special tokens
lowercase__ : str = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# Testing conversion to ids with special tokens
lowercase__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_)
lowercase__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# Testing the unknown token
lowercase__ : Optional[int] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
pass
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=15):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
# Simple input
lowercase__ : Optional[int] = """This is a simple input"""
lowercase__ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase__ : Optional[Any] = ("""This is a simple input""", """This is a pair""")
lowercase__ : int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""")
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""")
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""")
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""")
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""")
# Simple input
lowercase__ : Dict = """This is a simple input"""
lowercase__ : Optional[int] = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowercase__ : List[str] = ("""This is a simple input""", """This is a pair""")
lowercase__ : Any = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowercase__ : Union[str, Any] = tokenizer.pad_token_id
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=30 , return_tensors="""np""")
lowercase__ : Any = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors="""np""")
lowercase__ : List[Any] = tokenizer(*SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=60 , return_tensors="""np""")
lowercase__ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors="""np""")
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s["""input_ids"""])
self.assertTrue(0 in out_s["""attention_mask"""])
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0])
self.assertFalse(0 in out_sa["""attention_mask"""][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1])
self.assertTrue(0 in out_sa["""attention_mask"""][1])
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p["""input_ids"""])
self.assertTrue(0 in out_p["""attention_mask"""])
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0])
self.assertFalse(0 in out_pa["""attention_mask"""][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1])
self.assertTrue(0 in out_pa["""attention_mask"""][1])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = """$$$"""
lowercase__ : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE_ , add_bos_token=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = """This is a simple input"""
lowercase__ : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase__ : Optional[Any] = tokenizer.bos_token_id
lowercase__ : Any = tokenizer(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer(SCREAMING_SNAKE_CASE_)
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE_)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
lowercase__ : Any = tokenizer.decode(out_s.input_ids)
lowercase__ : int = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE_)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""")
lowercase__ : Optional[Any] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
lowercase__ : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
lowercase__ : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>"""), """^'''""", """^\"\"\"""", """\n\n\n"""]
lowercase__ : int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , truncate_before_pattern=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
pass
| 12 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
'''simple docstring'''
import sys
A__ : Tuple = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> int:
__lowerCamelCase : Any = 1
for digit in s:
product *= int(UpperCAmelCase_ )
return product
def UpperCAmelCase__ ( UpperCAmelCase_ : str = N ) -> int:
__lowerCamelCase : Dict = -sys.maxsize - 1
__lowerCamelCase : str = n[:13]
__lowerCamelCase : List[Any] = 13
while cur_index < len(UpperCAmelCase_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__lowerCamelCase : int = substr[1:] + n[cur_index]
cur_index += 1
else:
__lowerCamelCase : str = max(UpperCAmelCase_ , str_eval(UpperCAmelCase_ ) )
__lowerCamelCase : List[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a__ = '''src/diffusers'''
# Matches is_xxx_available()
a__ = re.compile(R'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
a__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
a__ = '''
{0} = None
'''
a__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
a__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def __UpperCAmelCase ( __a : Dict ) -> str:
"""simple docstring"""
_a : Optional[Any] = _re_backend.findall(__a )
if len(__a ) == 0:
return None
return "_and_".join(__a )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with open(os.path.join(__a ,'''__init__.py''' ) ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Get to the point we do the actual imports for type checking
_a : List[Any] = 0
_a : Optional[int] = {}
# Go through the end of the file
while line_index < len(__a ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_a : Any = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
_a : List[str] = []
# Until we unindent, add backend objects to the list
while line_index < len(__a ) and len(lines[line_index] ) > 1:
_a : Optional[Any] = lines[line_index]
_a : Any = _re_single_line_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__a ) > 0:
_a : Dict = objects
else:
line_index += 1
return backend_specific_objects
def __UpperCAmelCase ( __a : str ,__a : str ) -> str:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__a )
elif name.islower():
return DUMMY_FUNCTION.format(__a ,__a )
else:
return DUMMY_CLASS.format(__a ,__a )
def __UpperCAmelCase ( __a : List[Any]=None ) -> Tuple:
"""simple docstring"""
if backend_specific_objects is None:
_a : Optional[int] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_a : Optional[Any] = {}
for backend, objects in backend_specific_objects.items():
_a : Optional[int] = '''[''' + ''', '''.join(F"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']'''
_a : Optional[Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__a ,__a ) for o in objects] )
_a : Optional[Any] = dummy_file
return dummy_files
def __UpperCAmelCase ( __a : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
_a : Any = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_a : Dict = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
_a : Union[str, Any] = os.path.join(__a ,'''utils''' )
_a : List[Any] = {
backend: os.path.join(__a ,F"""dummy_{short_names.get(__a ,__a )}_objects.py""" )
for backend in dummy_files.keys()
}
_a : List[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__a ):
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Dict = f.read()
else:
_a : str = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(__a ,__a )}_objects.py as the main """
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F"""diffusers.utils.dummy_{short_names.get(__a ,__a )}_objects.py. Run `make fix-copies` """
'''to fix this.''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 14 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
A : List[Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
A : Any = 'cuda' if torch.cuda.is_available() else 'cpu'
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str=100 , __magic_name__ : Tuple=" " ) -> List[str]:
"""simple docstring"""
lowercase__ = text.split(__magic_name__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
def UpperCamelCase ( __magic_name__ : dict ) -> dict:
"""simple docstring"""
lowercase__ , lowercase__ = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__magic_name__ ):
titles.append(title if title is not None else """""" )
texts.append(__magic_name__ )
return {"title": titles, "text": texts}
def UpperCamelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
lowercase__ = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__magic_name__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowercase__ = ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> int:
"""simple docstring"""
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ = dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ )
lowercase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase__ = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowercase__ = dataset.map(
partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , )
# And finally save your dataset
lowercase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__magic_name__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__magic_name__ )
# And save the index
lowercase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__magic_name__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A :
'''simple docstring'''
A__ = field(
default=str(Path(UpperCAmelCase__ ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
A__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
A__ = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
A__ = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
A__ = field(
default=str(Path(UpperCAmelCase__ ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class A :
'''simple docstring'''
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
A__ = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class A :
'''simple docstring'''
A__ = field(
default=7_68 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
A__ = field(
default=1_28 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
A : Optional[int] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
A , A , A : Union[str, Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
A : Union[str, Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 15 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( A__ : Tuple , A__ : Any , A__ : List[Any] , A__ : Any , A__ : Optional[int] ):
# Load configuration defined in the metadata file
with open(A__ ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(A__ )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=A__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_entity_vocab(A__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken("<ent>" , lstrip=A__ , rstrip=A__ )
SCREAMING_SNAKE_CASE = AddedToken("<ent2>" , lstrip=A__ , rstrip=A__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(A__ )
with open(os.path.join(A__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(A__ , A__ )
SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(A__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = state_dict["embeddings.word_embeddings.weight"]
SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = F"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict["entity_embeddings.entity_embeddings.weight"]
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["[MASK]"]]
SCREAMING_SNAKE_CASE = LukeModel(config=A__ ).eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(A__ , strict=A__ )
if not (len(A__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"Missing keys {', '.join(A__ )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(A__ , task="entity_classification" )
SCREAMING_SNAKE_CASE = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
SCREAMING_SNAKE_CASE = (39, 42)
SCREAMING_SNAKE_CASE = tokenizer(A__ , entity_spans=[span] , add_prefix_space=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**A__ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE = torch.Size((1, 42, 1024) )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 42, 768) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 1024) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(A__ ) )
model.save_pretrained(A__ )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = {}
with open(A__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(A__ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = line.rstrip().split("\t" )
SCREAMING_SNAKE_CASE = index
return entity_vocab
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 16 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = '''▁'''
UpperCAmelCase_ : List[Any] = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase_ : Any = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
UpperCAmelCase_ : int = {
'''google/reformer-crime-and-punishment''': 524_288,
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __A : Tuple , __A : Dict="</s>" , __A : Any="<unk>" , __A : Any=[] , __A : Optional[Dict[str, Any]] = None , **__A : Tuple , ):
__A : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__A , unk_token=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__A : str = vocab_file
__A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.sp_model.get_piece_size()
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
__A : Dict = self.__dict__.copy()
__A : Tuple = None
return state
def __setstate__( self : int , __A : List[Any] ):
__A : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__A : Union[str, Any] = {}
__A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : str , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str ):
return self.sp_model.piece_to_id(__A )
def lowerCAmelCase_ ( self : Any , __A : List[str] ):
if index < self.sp_model.get_piece_size():
__A : Any = self.sp_model.IdToPiece(__A )
return token
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Optional[Any] = []
__A : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__A ) + token
__A : Union[str, Any] = []
else:
current_sub_tokens.append(__A )
out_string += self.sp_model.decode(__A )
return out_string.strip()
def lowerCAmelCase_ ( self : Tuple , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : str = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
__A : Any = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 17 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self ) -> Optional[int]:
return len(self.sp_model )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _lowerCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase = " ".join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase )
_lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
_lowerCAmelCase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.PieceToId(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 18 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_enforce_args(__snake_case, __snake_case )
if n == 0:
return 0
_UpperCamelCase = float('''-inf''' )
for i in range(1, n + 1 ):
_UpperCamelCase = max(
__snake_case, prices[i - 1] + naive_cut_rod_recursive(n - i, __snake_case ) )
return max_revue
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_enforce_args(__snake_case, __snake_case )
_UpperCamelCase = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__snake_case, __snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> str:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_UpperCamelCase = float('''-inf''' )
for i in range(1, n + 1 ):
_UpperCamelCase = max(
__snake_case, prices[i - 1] + _top_down_cut_rod_recursive(n - i, __snake_case, __snake_case ), )
_UpperCamelCase = max_revenue
return max_rev[n]
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_enforce_args(__snake_case, __snake_case )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_UpperCamelCase = [float('''-inf''' ) for _ in range(n + 1 )]
_UpperCamelCase = 0
for i in range(1, n + 1 ):
_UpperCamelCase = max_rev[i]
for j in range(1, i + 1 ):
_UpperCamelCase = max(__snake_case, prices[j - 1] + max_rev[i - j] )
_UpperCamelCase = max_revenue_i
return max_rev[n]
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
if n < 0:
_UpperCamelCase = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(__snake_case )
if n > len(__snake_case ):
_UpperCamelCase = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(__snake_case )}'''
)
raise ValueError(__snake_case )
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [6, 10, 12, 15, 20, 23]
_UpperCamelCase = len(__snake_case )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_UpperCamelCase = 36
_UpperCamelCase = top_down_cut_rod(__snake_case, __snake_case )
_UpperCamelCase = bottom_up_cut_rod(__snake_case, __snake_case )
_UpperCamelCase = naive_cut_rod_recursive(__snake_case, __snake_case )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 19 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Optional[Any] = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowercase_ (lowercase__ ):
snake_case ='ibert'
def __init__( self , lowercase_=30522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_="absolute" , lowercase_=False , lowercase_="none" , **lowercase_ , ) -> str:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_act
a__ =intermediate_size
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =initializer_range
a__ =layer_norm_eps
a__ =position_embedding_type
a__ =quant_mode
a__ =force_dequant
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : List[str] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ : Union[str, Any] =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : Optional[Any] =""""""
else:
__magic_name__ : List[Any] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Tuple =state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
__magic_name__ : Optional[Any] =state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Union[str, Any] =in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : Any =in_proj_bias[: config.hidden_size]
__magic_name__ : Any =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Any =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : Union[str, Any] =in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : Any =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__magic_name__ : List[Any] =[
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[Any] =dct.pop(lowerCamelCase )
__magic_name__ : List[str] =val
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =ViTMSNConfig()
__magic_name__ : str =1000
__magic_name__ : Tuple ="""datasets/huggingface/label-files"""
__magic_name__ : Union[str, Any] ="""imagenet-1k-id2label.json"""
__magic_name__ : int =json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase ) , """r""" ) )
__magic_name__ : Dict ={int(lowerCamelCase ): v for k, v in idalabel.items()}
__magic_name__ : int =idalabel
__magic_name__ : List[Any] ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__magic_name__ : int =384
__magic_name__ : List[str] =1536
__magic_name__ : Optional[int] =6
elif "l16" in checkpoint_url:
__magic_name__ : Tuple =1024
__magic_name__ : Dict =4096
__magic_name__ : Tuple =24
__magic_name__ : Tuple =16
__magic_name__ : List[str] =0.1
elif "b4" in checkpoint_url:
__magic_name__ : str =4
elif "l7" in checkpoint_url:
__magic_name__ : List[Any] =7
__magic_name__ : Dict =1024
__magic_name__ : Any =4096
__magic_name__ : Union[str, Any] =24
__magic_name__ : Tuple =16
__magic_name__ : Any =0.1
__magic_name__ : List[str] =ViTMSNModel(lowerCamelCase )
__magic_name__ : str =torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="""cpu""" )["""target_encoder"""]
__magic_name__ : Dict =ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
__magic_name__ : Dict =create_rename_keys(lowerCamelCase , base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase , base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
__magic_name__ : List[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : Dict =Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__magic_name__ : Tuple =ViTImageProcessor(
size=config.image_size , image_mean=lowerCamelCase , image_std=lowerCamelCase )
__magic_name__ : Tuple =image_processor(images=lowerCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__magic_name__ : Dict =model(**lowerCamelCase )
__magic_name__ : List[Any] =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__magic_name__ : Any =torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
__magic_name__ : Optional[int] =torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
__magic_name__ : Tuple =torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
__magic_name__ : Tuple =torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
__magic_name__ : List[Any] =torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase , atol=1E-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase_ : int = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 21 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 0 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_snake_case : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_snake_case : Dict = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_snake_case : Optional[int] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case_ (UpperCamelCase : tuple ):
'''simple docstring'''
return x[0]
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = get_letter_count(UpperCamelCase )
_a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase )
_a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCamelCase )
_a = ''''''.join(freq_to_letter[freq] )
_a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCamelCase , reverse=UpperCamelCase )
_a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCamelCase )
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = get_frequency_order(UpperCamelCase )
_a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=12 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0 , _UpperCAmelCase=None , ) -> Dict:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = projection_dim
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCamelCase_ = input_mask.numpy()
UpperCamelCase_ , UpperCamelCase_ = input_mask.shape
UpperCamelCase_ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
UpperCamelCase_ = 1
UpperCamelCase_ = 0
UpperCamelCase_ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = TFBlipTextModel(config=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , training=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (TFBlipTextModel,) if is_tf_available() else ()
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = BlipTextModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@slow
def _UpperCAmelCase ( self ) -> int:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = TFBlipTextModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase=True ) -> List[Any]:
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCAmelCase )
| 23 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
__snake_case = path_or_paths
__snake_case = split if split or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else '''train'''
__snake_case = features
__snake_case = cache_dir
__snake_case = keep_in_memory
__snake_case = streaming
__snake_case = num_proc
__snake_case = kwargs
@abstractmethod
def lowerCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = features
__snake_case = cache_dir
__snake_case = keep_in_memory
__snake_case = streaming
__snake_case = num_proc
__snake_case = kwargs
@abstractmethod
def lowerCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 24 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , a : int = 32 , a : int = 64 , a : int = 20 , a : int = 768 , a : int=77 , a : str=4 , a : float = 0.0 , a : str = "silu" , a : Optional[str] = None , a : Optional[str] = None , a : Optional[str] = "linear" , a : Optional[str] = "prd" , a : Optional[int] = None , a : Optional[int] = None , a : Optional[int] = None , ) -> int:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = attention_head_dim
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE : Any = additional_embeddings
SCREAMING_SNAKE_CASE : int = time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE : List[Any] = embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE : Any = clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE : Any = Timesteps(a , a , 0 )
SCREAMING_SNAKE_CASE : Dict = TimestepEmbedding(a , a , out_dim=a , act_fn=a )
SCREAMING_SNAKE_CASE : str = nn.Linear(a , a )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE : Dict = None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE : Tuple = nn.LayerNorm(a )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
SCREAMING_SNAKE_CASE : str = nn.Linear(a , a )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE : Tuple = None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE : Any = nn.Linear(a , a )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , a ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.zeros(1 , 1 , a ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(
[
BasicTransformerBlock(
a , a , a , dropout=a , activation_fn="gelu" , attention_bias=a , )
for d in range(a )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.LayerNorm(a )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE : Optional[int] = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.LayerNorm(a )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(a , a )
SCREAMING_SNAKE_CASE : Tuple = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE : Dict = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , a , persistent=a )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.zeros(1 , a ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.zeros(1 , a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
def fn_recursive_add_processors(a : str , a : torch.nn.Module , a : Dict[str, AttentionProcessor] ):
if hasattr(a , "set_processor" ):
SCREAMING_SNAKE_CASE : int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , a , a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a , a , a )
return processors
def __UpperCamelCase ( self : Tuple , a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(a , a ) and len(a ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(a )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(a : str , a : torch.nn.Module , a : int ):
if hasattr(a , "set_processor" ):
if not isinstance(a , a ):
module.set_processor(a )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , a , a )
for name, module in self.named_children():
fn_recursive_attn_processor(a , a , a )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def __UpperCamelCase ( self : Optional[Any] , a : Union[str, Any] , a : Union[torch.Tensor, float, int] , a : torch.FloatTensor , a : Optional[torch.FloatTensor] = None , a : Optional[torch.BoolTensor] = None , a : bool = True , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = hidden_states.shape[0]
SCREAMING_SNAKE_CASE : Any = timestep
if not torch.is_tensor(a ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(a ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE : Optional[int] = timesteps * torch.ones(a , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE : Tuple = self.time_proj(a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE : List[Any] = timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.time_embedding(a )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.embedding_proj_norm(a )
SCREAMING_SNAKE_CASE : str = self.embedding_proj(a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.encoder_hidden_states_proj(a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
if encoder_hidden_states is not None:
additional_embeds.append(a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE : List[Any] = hidden_states[:, None, :]
SCREAMING_SNAKE_CASE : Optional[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE : Any = self.prd_embedding.to(hidden_states.dtype ).expand(a , -1 , -1 )
additional_embeds.append(a )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(
a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE : str = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE : Any = F.pad(
a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
SCREAMING_SNAKE_CASE : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE : int = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
SCREAMING_SNAKE_CASE : Optional[int] = F.pad(a , (0, self.additional_embeddings) , value=0.0 )
SCREAMING_SNAKE_CASE : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm_in(a )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE : str = block(a , attention_mask=a )
SCREAMING_SNAKE_CASE : List[str] = self.norm_out(a )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE : Any = hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.proj_to_clip_embeddings(a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=a )
def __UpperCamelCase ( self : str , a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 25 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger()
@dataclass
class _A :
lowercase__: nn.Module
lowercase__: List[nn.Module] = field(default_factory=__lowercase )
lowercase__: list = field(default_factory=__lowercase )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Tensor , __magic_name__ : Tensor ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = len(list(m.modules() ) ) == 1 or isinstance(__magic_name__ , nn.Convad ) or isinstance(__magic_name__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__magic_name__ )
def __call__( self : Optional[int] , __magic_name__ : Tensor ) -> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__magic_name__ )
[x.remove() for x in self.handles]
return self
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
return list(filter(lambda __magic_name__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _A :
lowercase__: nn.Module
lowercase__: nn.Module
lowercase__: int = 1
lowercase__: List = field(default_factory=__lowercase )
lowercase__: List = field(default_factory=__lowercase )
lowercase__: bool = True
def __call__( self : Dict , __magic_name__ : Tensor ) -> List[str]:
"""simple docstring"""
__snake_case : Any = Tracker(self.dest )(__magic_name__ ).parametrized
__snake_case : Dict = Tracker(self.src )(__magic_name__ ).parametrized
__snake_case : List[str] = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.src_skip , __magic_name__ ) )
__snake_case : List[str] = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.dest_skip , __magic_name__ ) )
if len(__magic_name__ ) != len(__magic_name__ ) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(__magic_name__ )} operations while'''
f''' destination module has {len(__magic_name__ )}.''' )
for dest_m, src_m in zip(__magic_name__ , __magic_name__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
class _A ( nn.Module ):
def __init__( self : Dict , __magic_name__ : nn.Module ) -> Any:
"""simple docstring"""
super().__init__()
__snake_case : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), f'''Unexpected layer name {k}'''
__snake_case : Optional[int] = len(__magic_name__ ) + 1
feature_blocks.append((f'''res{block_index}''', v) )
__snake_case : str = nn.ModuleDict(__magic_name__ )
def lowercase__ ( self : int , __magic_name__ : Tensor ) -> Tuple:
"""simple docstring"""
return get_trunk_forward_outputs(
__magic_name__ , out_feat_keys=__magic_name__ , feature_blocks=self._feature_blocks , )
class _A ( __lowercase ):
def lowercase__ ( self : Any , __magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : List[Any] = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : int , __magic_name__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
if x not in self:
__snake_case : Tuple = self.convert_name_to_timm(__magic_name__ )
__snake_case : List[str] = partial(lambda: (timm.create_model(__magic_name__ , pretrained=__magic_name__ ).eval(), None) )
else:
__snake_case : Tuple = super().__getitem__(__magic_name__ )
return val
class _A ( __lowercase ):
def __getitem__( self : Optional[Any] , __magic_name__ : str ) -> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
__snake_case : Any = RegNetModel
else:
__snake_case : Tuple = RegNetForImageClassification
return val
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
for from_key, to_key in keys:
__snake_case : int = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) -> Union[str, Any]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
__snake_case , __snake_case : str = from_model_func()
__snake_case : Optional[Any] = our_model_func(_lowerCamelCase ).eval()
__snake_case : Dict = ModuleTransfer(src=_lowerCamelCase , dest=_lowerCamelCase , raise_if_mismatch=_lowerCamelCase )
__snake_case : Union[str, Any] = torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCamelCase )
if from_state_dict is not None:
__snake_case : Optional[Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__snake_case : Any = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
__snake_case : List[str] = manually_copy_vissl_head(_lowerCamelCase , our_model.state_dict() , _lowerCamelCase )
our_model.load_state_dict(_lowerCamelCase )
__snake_case : str = our_model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
__snake_case : Any = (
our_outputs.logits if isinstance(_lowerCamelCase , _lowerCamelCase ) else our_outputs.last_hidden_state
)
__snake_case : List[str] = from_model(_lowerCamelCase )
__snake_case : List[Any] = from_output[-1] if type(_lowerCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__snake_case : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(_lowerCamelCase , _lowerCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
__snake_case : Optional[int] = 224 if """seer""" not in name else 384
# we can use the convnext one
__snake_case : List[Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=_lowerCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
print(F'''Pushed {name}''' )
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True ) -> List[str]:
"""simple docstring"""
__snake_case : int = """imagenet-1k-id2label.json"""
__snake_case : int = 1000
__snake_case : Any = (1, num_labels)
__snake_case : Union[str, Any] = """huggingface/label-files"""
__snake_case : List[str] = num_labels
__snake_case : int = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__snake_case : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
__snake_case : int = partial(_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
__snake_case : Optional[Any] = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
__snake_case : List[str] = NameToOurModelFuncMap()
__snake_case : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_lowerCamelCase , _lowerCamelCase ) -> Tuple[nn.Module, Dict]:
__snake_case : List[str] = torch.hub.load_state_dict_from_url(_lowerCamelCase , model_dir=str(_lowerCamelCase ) , map_location="""cpu""" )
__snake_case : Optional[Any] = model_func()
# check if we have a head, if yes add it
__snake_case : str = files["""classy_state_dict"""]["""base_model"""]["""model"""]
__snake_case : Any = model_state_dict["""trunk"""]
model.load_state_dict(_lowerCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__snake_case : List[str] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[str] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Tuple = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : str = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__snake_case : Dict = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[Any] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Union[str, Any] = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : str = partial(
_lowerCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _lowerCamelCase , _lowerCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 26 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 0 |
from __future__ import annotations
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> set[str]:
"""simple docstring"""
_A, _A = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
_A = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
__A : Optional[int] = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 27 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(A ):
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A, A )
SCREAMING_SNAKE_CASE : Tuple = FlaxAutoModel.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A, A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(A ):
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A, A )
SCREAMING_SNAKE_CASE : str = FlaxAutoModel.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A, A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(A )
SCREAMING_SNAKE_CASE : List[str] = FlaxBertModel.from_pretrained(A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX )
@jax.jit
def eval(**A ):
return model(**A )
eval(**A ).block_until_ready()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = FlaxRobertaModel.from_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX )
@jax.jit
def eval(**A ):
return model(**A )
eval(**A ).block_until_ready()
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A, 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE : Optional[int] = FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModel.from_pretrained(A, revision='aaaaaa' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A, 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack', ):
SCREAMING_SNAKE_CASE : Optional[int] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(A, 'Use `from_pt=True` to load this model' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 28 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return base * power(lowerCAmelCase__ ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
A_ = int(input("""Enter the base: """).strip())
A_ = int(input("""Enter the exponent: """).strip())
A_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ = 1 / result
print(f"{base} to the power of {exponent} is {result}")
| 29 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__a = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__a = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__a = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) ,homepage='''https://github.com/hendrycks/math''' ,codebase_urls=['''https://github.com/hendrycks/math'''] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : List[Any] = 0.0
for i, j in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else 0.0
UpperCAmelCase_ : str = n_correct / len(_SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
} | 30 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : int = 50 ) -> int:
SCREAMING_SNAKE_CASE_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''') | 31 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=32 * 4 , _UpperCamelCase=32 * 6 , _UpperCamelCase=4 , _UpperCamelCase=32 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCamelCase )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCamelCase )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCamelCase ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCamelCase ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , config.decoder_config.decoder_layers )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , output_hidden_states=_UpperCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
def comm_check_on_output(_UpperCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
_UpperCAmelCase = model(
pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__A : int = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : Optional[Any] = False
def UpperCamelCase( self ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCamelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def UpperCamelCase( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_UpperCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_UpperCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_UpperCamelCase ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase ).to(_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase , output_attentions=_UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ = 1e-4
def A__ ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def UpperCamelCase( self ):
_UpperCAmelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
_UpperCAmelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
_UpperCAmelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_UpperCAmelCase = inputs['''pixel_values'''].to(_UpperCamelCase )
_UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''mask_labels''']]
_UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None ) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Optional[int]=0 ):
snake_case__ = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(_a ) )
snake_case__ = np.random.RandomState(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
# warmup pass to apply optimizations
snake_case__ = pipe(**self.get_dummy_inputs() )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs()
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case__ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = ort.SessionOptions()
snake_case__ = False
return options
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case__ = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = '''A fantasy landscape, trending on artstation'''
snake_case__ = np.random.RandomState(0 )
snake_case__ = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type='''np''' , )
snake_case__ = output.images
snake_case__ = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
snake_case__ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case__ = init_image.resize((7_68, 5_12) )
snake_case__ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = '''A fantasy landscape, trending on artstation'''
snake_case__ = np.random.RandomState(0 )
snake_case__ = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type='''np''' , )
snake_case__ = output.images
snake_case__ = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
snake_case__ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 33 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A_ = Features({'''text''': Value('''string''' )} )
A_ = Features({'''summary''': Value('''string''' )} )
A_ = "text"
A_ = "summary"
@property
def UpperCAmelCase__ ( self) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"} | 34 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
def a ( A__ ) -> str:
'''simple docstring'''
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
SCREAMING_SNAKE_CASE__ : List[str] = ''''''
while len(A__ ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = '''0''' + bin_string
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(A__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : Dict = 0
for index, val in enumerate(A__ ):
oct_val += int(2 ** (2 - index) * int(A__ ) )
oct_string += str(A__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : str = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'xlm'
_lowercase = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : Tuple , lowerCamelCase__ : List[Any]=30_145 , lowerCamelCase__ : Any=2_048 , lowerCamelCase__ : int=12 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : str=True , lowerCamelCase__ : str=512 , lowerCamelCase__ : List[str]=2_048**-0.5 , lowerCamelCase__ : List[str]=1E-12 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : Any=1 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Optional[Any]=5 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[Any]="first" , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Tuple=5 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Tuple=0 , **lowerCamelCase__ : List[str] , ):
a__ : List[str] = vocab_size
a__ : str = emb_dim
a__ : List[str] = n_layers
a__ : Union[str, Any] = n_heads
a__ : Any = dropout
a__ : int = attention_dropout
a__ : List[str] = gelu_activation
a__ : Optional[Any] = sinusoidal_embeddings
a__ : Optional[int] = causal
a__ : Optional[int] = asm
a__ : int = n_langs
a__ : Tuple = use_lang_emb
a__ : str = layer_norm_eps
a__ : Tuple = bos_index
a__ : int = eos_index
a__ : Optional[int] = pad_index
a__ : List[str] = unk_index
a__ : List[Any] = mask_index
a__ : List[str] = is_encoder
a__ : List[Any] = max_position_embeddings
a__ : Union[str, Any] = embed_init_std
a__ : Tuple = init_std
a__ : List[str] = summary_type
a__ : List[str] = summary_use_proj
a__ : Dict = summary_activation
a__ : str = summary_proj_to_labels
a__ : Dict = summary_first_dropout
a__ : List[str] = start_n_top
a__ : List[Any] = end_n_top
a__ : Dict = mask_token_id
a__ : Optional[Any] = lang_id
if "n_words" in kwargs:
a__ : Tuple = kwargs["n_words"]
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
class A__ ( A__ ):
"""simple docstring"""
@property
def _UpperCamelCase( self : Dict ):
if self.task == "multiple-choice":
a__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 37 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : int ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = b.T
snake_case__ : Union[str, Any] = np.sum(np.square(__magic_name__ ) , axis=1 )
snake_case__ : List[Any] = np.sum(np.square(__magic_name__ ) , axis=0 )
snake_case__ : Dict = np.matmul(__magic_name__ , __magic_name__ )
snake_case__ : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = x.reshape(-1 , 3 )
snake_case__ : Optional[int] = squared_euclidean_distance(__magic_name__ , __magic_name__ )
return np.argmin(__magic_name__ , axis=1 )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case__ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = np.array(__SCREAMING_SNAKE_CASE ) if clusters is not None else None
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : int = resample
snake_case__ : List[Any] = do_normalize
snake_case__ : Any = do_color_quantize
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Any = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
__SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : Any = rescale(image=__SCREAMING_SNAKE_CASE , scale=1 / 127.5 , data_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = image - 1
return image
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : str = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = size if size is not None else self.size
snake_case__ : int = get_size_dict(__SCREAMING_SNAKE_CASE )
snake_case__ : str = resample if resample is not None else self.resample
snake_case__ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ : Any = clusters if clusters is not None else self.clusters
snake_case__ : List[Any] = np.array(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
snake_case__ : Union[str, Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case__ : List[Any] = [self.normalize(image=__SCREAMING_SNAKE_CASE ) for image in images]
if do_color_quantize:
snake_case__ : int = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ : Optional[Any] = np.array(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = color_quantize(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ : List[Any] = images.shape[0]
snake_case__ : List[str] = images.reshape(__SCREAMING_SNAKE_CASE , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ : Optional[int] = list(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Dict = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
snake_case__ : Optional[int] = {"""input_ids""": images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 38 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 0 |
import argparse
import struct
import unittest
class snake_case_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : bytes ) ->None:
snake_case_ = data
# Initialize hash values
snake_case_ = [
0x6_a_0_9_e_6_6_7,
0xb_b_6_7_a_e_8_5,
0x3_c_6_e_f_3_7_2,
0xa_5_4_f_f_5_3_a,
0x5_1_0_e_5_2_7_f,
0x9_b_0_5_6_8_8_c,
0x1_f_8_3_d_9_a_b,
0x5_b_e_0_c_d_1_9,
]
# Initialize round constants
snake_case_ = [
0x4_2_8_a_2_f_9_8,
0x7_1_3_7_4_4_9_1,
0xb_5_c_0_f_b_c_f,
0xe_9_b_5_d_b_a_5,
0x3_9_5_6_c_2_5_b,
0x5_9_f_1_1_1_f_1,
0x9_2_3_f_8_2_a_4,
0xa_b_1_c_5_e_d_5,
0xd_8_0_7_a_a_9_8,
0x1_2_8_3_5_b_0_1,
0x2_4_3_1_8_5_b_e,
0x5_5_0_c_7_d_c_3,
0x7_2_b_e_5_d_7_4,
0x8_0_d_e_b_1_f_e,
0x9_b_d_c_0_6_a_7,
0xc_1_9_b_f_1_7_4,
0xe_4_9_b_6_9_c_1,
0xe_f_b_e_4_7_8_6,
0x0_f_c_1_9_d_c_6,
0x2_4_0_c_a_1_c_c,
0x2_d_e_9_2_c_6_f,
0x4_a_7_4_8_4_a_a,
0x5_c_b_0_a_9_d_c,
0x7_6_f_9_8_8_d_a,
0x9_8_3_e_5_1_5_2,
0xa_8_3_1_c_6_6_d,
0xb_0_0_3_2_7_c_8,
0xb_f_5_9_7_f_c_7,
0xc_6_e_0_0_b_f_3,
0xd_5_a_7_9_1_4_7,
0x0_6_c_a_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_b_7_0_a_8_5,
0x2_e_1_b_2_1_3_8,
0x4_d_2_c_6_d_f_c,
0x5_3_3_8_0_d_1_3,
0x6_5_0_a_7_3_5_4,
0x7_6_6_a_0_a_b_b,
0x8_1_c_2_c_9_2_e,
0x9_2_7_2_2_c_8_5,
0xa_2_b_f_e_8_a_1,
0xa_8_1_a_6_6_4_b,
0xc_2_4_b_8_b_7_0,
0xc_7_6_c_5_1_a_3,
0xd_1_9_2_e_8_1_9,
0xd_6_9_9_0_6_2_4,
0xf_4_0_e_3_5_8_5,
0x1_0_6_a_a_0_7_0,
0x1_9_a_4_c_1_1_6,
0x1_e_3_7_6_c_0_8,
0x2_7_4_8_7_7_4_c,
0x3_4_b_0_b_c_b_5,
0x3_9_1_c_0_c_b_3,
0x4_e_d_8_a_a_4_a,
0x5_b_9_c_c_a_4_f,
0x6_8_2_e_6_f_f_3,
0x7_4_8_f_8_2_e_e,
0x7_8_a_5_6_3_6_f,
0x8_4_c_8_7_8_1_4,
0x8_c_c_7_0_2_0_8,
0x9_0_b_e_f_f_f_a,
0xa_4_5_0_6_c_e_b,
0xb_e_f_9_a_3_f_7,
0xc_6_7_1_7_8_f_2,
]
snake_case_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case__( _UpperCamelCase : bytes ) ->bytes:
snake_case_ = B'''\x80''' + (B'''\x00''' * (6_3 - (len(_UpperCamelCase ) + 8) % 6_4))
snake_case_ = struct.pack('''>Q''' , (len(_UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def snake_case__( self : Optional[Any] ) ->None:
# Convert into blocks of 64 bytes
snake_case_ = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
snake_case_ = list(struct.unpack('''>16L''' , _UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 4_8
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
snake_case_ = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
snake_case_ = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
snake_case_ = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
snake_case_ = self.ror(_UpperCamelCase , 6 ) ^ self.ror(_UpperCamelCase , 1_1 ) ^ self.ror(_UpperCamelCase , 2_5 )
snake_case_ = (e & f) ^ ((~e & 0xf_f_f_f_f_f_f_f) & g)
snake_case_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
snake_case_ = self.ror(_UpperCamelCase , 2 ) ^ self.ror(_UpperCamelCase , 1_3 ) ^ self.ror(_UpperCamelCase , 2_2 )
snake_case_ = (a & b) ^ (a & c) ^ (b & c)
snake_case_ = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
snake_case_ = [a, b, c, d, e, f, g, h]
# Modify final values
snake_case_ = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
snake_case_ = ''''''.join([hex(_UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int ) ->int:
return 0xf_f_f_f_f_f_f_f & (value << (3_2 - rotations)) | (value >> rotations)
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any ) ->None:
import hashlib
snake_case_ = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(_UpperCamelCase ).hash , hashlib.shaaaa(_UpperCamelCase ).hexdigest() )
def __SCREAMING_SNAKE_CASE ():
import doctest
doctest.testmod()
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
snake_case_ = parser.parse_args()
snake_case_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
snake_case_ = f.read()
else:
snake_case_ = bytes(SCREAMING_SNAKE_CASE__ , '''utf-8''' )
print(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash )
if __name__ == "__main__":
main() | 39 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.